2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
34 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
35 struct btrfs_free_space
*info
);
36 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
37 struct btrfs_free_space
*info
);
39 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
40 struct btrfs_path
*path
,
44 struct btrfs_key location
;
45 struct btrfs_disk_key disk_key
;
46 struct btrfs_free_space_header
*header
;
47 struct extent_buffer
*leaf
;
48 struct inode
*inode
= NULL
;
51 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
55 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
59 btrfs_release_path(path
);
60 return ERR_PTR(-ENOENT
);
63 leaf
= path
->nodes
[0];
64 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
65 struct btrfs_free_space_header
);
66 btrfs_free_space_key(leaf
, header
, &disk_key
);
67 btrfs_disk_key_to_cpu(&location
, &disk_key
);
68 btrfs_release_path(path
);
70 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
72 return ERR_PTR(-ENOENT
);
75 if (is_bad_inode(inode
)) {
77 return ERR_PTR(-ENOENT
);
80 mapping_set_gfp_mask(inode
->i_mapping
,
81 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
86 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
87 struct btrfs_block_group_cache
88 *block_group
, struct btrfs_path
*path
)
90 struct inode
*inode
= NULL
;
91 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
93 spin_lock(&block_group
->lock
);
94 if (block_group
->inode
)
95 inode
= igrab(block_group
->inode
);
96 spin_unlock(&block_group
->lock
);
100 inode
= __lookup_free_space_inode(root
, path
,
101 block_group
->key
.objectid
);
105 spin_lock(&block_group
->lock
);
106 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
107 btrfs_info(root
->fs_info
,
108 "Old style space inode found, converting.");
109 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
110 BTRFS_INODE_NODATACOW
;
111 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
114 if (!block_group
->iref
) {
115 block_group
->inode
= igrab(inode
);
116 block_group
->iref
= 1;
118 spin_unlock(&block_group
->lock
);
123 static int __create_free_space_inode(struct btrfs_root
*root
,
124 struct btrfs_trans_handle
*trans
,
125 struct btrfs_path
*path
,
128 struct btrfs_key key
;
129 struct btrfs_disk_key disk_key
;
130 struct btrfs_free_space_header
*header
;
131 struct btrfs_inode_item
*inode_item
;
132 struct extent_buffer
*leaf
;
133 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
136 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
140 /* We inline crc's for the free disk space cache */
141 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
142 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
144 leaf
= path
->nodes
[0];
145 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
146 struct btrfs_inode_item
);
147 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
148 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
149 sizeof(*inode_item
));
150 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
151 btrfs_set_inode_size(leaf
, inode_item
, 0);
152 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
153 btrfs_set_inode_uid(leaf
, inode_item
, 0);
154 btrfs_set_inode_gid(leaf
, inode_item
, 0);
155 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
156 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
157 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
158 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
159 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
160 btrfs_mark_buffer_dirty(leaf
);
161 btrfs_release_path(path
);
163 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
167 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
168 sizeof(struct btrfs_free_space_header
));
170 btrfs_release_path(path
);
173 leaf
= path
->nodes
[0];
174 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
175 struct btrfs_free_space_header
);
176 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
177 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
178 btrfs_mark_buffer_dirty(leaf
);
179 btrfs_release_path(path
);
184 int create_free_space_inode(struct btrfs_root
*root
,
185 struct btrfs_trans_handle
*trans
,
186 struct btrfs_block_group_cache
*block_group
,
187 struct btrfs_path
*path
)
192 ret
= btrfs_find_free_objectid(root
, &ino
);
196 return __create_free_space_inode(root
, trans
, path
, ino
,
197 block_group
->key
.objectid
);
200 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
201 struct btrfs_trans_handle
*trans
,
202 struct btrfs_path
*path
,
205 struct btrfs_block_rsv
*rsv
;
210 rsv
= trans
->block_rsv
;
211 trans
->block_rsv
= &root
->fs_info
->global_block_rsv
;
213 /* 1 for slack space, 1 for updating the inode */
214 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
215 btrfs_calc_trans_metadata_size(root
, 1);
217 spin_lock(&trans
->block_rsv
->lock
);
218 if (trans
->block_rsv
->reserved
< needed_bytes
) {
219 spin_unlock(&trans
->block_rsv
->lock
);
220 trans
->block_rsv
= rsv
;
223 spin_unlock(&trans
->block_rsv
->lock
);
225 oldsize
= i_size_read(inode
);
226 btrfs_i_size_write(inode
, 0);
227 truncate_pagecache(inode
, oldsize
, 0);
230 * We don't need an orphan item because truncating the free space cache
231 * will never be split across transactions.
233 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
234 0, BTRFS_EXTENT_DATA_KEY
);
237 trans
->block_rsv
= rsv
;
238 btrfs_abort_transaction(trans
, root
, ret
);
242 ret
= btrfs_update_inode(trans
, root
, inode
);
244 btrfs_abort_transaction(trans
, root
, ret
);
245 trans
->block_rsv
= rsv
;
250 static int readahead_cache(struct inode
*inode
)
252 struct file_ra_state
*ra
;
253 unsigned long last_index
;
255 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
259 file_ra_state_init(ra
, inode
->i_mapping
);
260 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
262 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
273 struct btrfs_root
*root
;
277 unsigned check_crcs
:1;
280 static int io_ctl_init(struct io_ctl
*io_ctl
, struct inode
*inode
,
281 struct btrfs_root
*root
)
283 memset(io_ctl
, 0, sizeof(struct io_ctl
));
284 io_ctl
->num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
286 io_ctl
->pages
= kzalloc(sizeof(struct page
*) * io_ctl
->num_pages
,
291 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
292 io_ctl
->check_crcs
= 1;
296 static void io_ctl_free(struct io_ctl
*io_ctl
)
298 kfree(io_ctl
->pages
);
301 static void io_ctl_unmap_page(struct io_ctl
*io_ctl
)
304 kunmap(io_ctl
->page
);
310 static void io_ctl_map_page(struct io_ctl
*io_ctl
, int clear
)
312 BUG_ON(io_ctl
->index
>= io_ctl
->num_pages
);
313 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
314 io_ctl
->cur
= kmap(io_ctl
->page
);
315 io_ctl
->orig
= io_ctl
->cur
;
316 io_ctl
->size
= PAGE_CACHE_SIZE
;
318 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
321 static void io_ctl_drop_pages(struct io_ctl
*io_ctl
)
325 io_ctl_unmap_page(io_ctl
);
327 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
328 if (io_ctl
->pages
[i
]) {
329 ClearPageChecked(io_ctl
->pages
[i
]);
330 unlock_page(io_ctl
->pages
[i
]);
331 page_cache_release(io_ctl
->pages
[i
]);
336 static int io_ctl_prepare_pages(struct io_ctl
*io_ctl
, struct inode
*inode
,
340 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
343 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
344 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
346 io_ctl_drop_pages(io_ctl
);
349 io_ctl
->pages
[i
] = page
;
350 if (uptodate
&& !PageUptodate(page
)) {
351 btrfs_readpage(NULL
, page
);
353 if (!PageUptodate(page
)) {
354 printk(KERN_ERR
"btrfs: error reading free "
356 io_ctl_drop_pages(io_ctl
);
362 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
363 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
364 set_page_extent_mapped(io_ctl
->pages
[i
]);
370 static void io_ctl_set_generation(struct io_ctl
*io_ctl
, u64 generation
)
374 io_ctl_map_page(io_ctl
, 1);
377 * Skip the csum areas. If we don't check crcs then we just have a
378 * 64bit chunk at the front of the first page.
380 if (io_ctl
->check_crcs
) {
381 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
382 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
384 io_ctl
->cur
+= sizeof(u64
);
385 io_ctl
->size
-= sizeof(u64
) * 2;
389 *val
= cpu_to_le64(generation
);
390 io_ctl
->cur
+= sizeof(u64
);
393 static int io_ctl_check_generation(struct io_ctl
*io_ctl
, u64 generation
)
398 * Skip the crc area. If we don't check crcs then we just have a 64bit
399 * chunk at the front of the first page.
401 if (io_ctl
->check_crcs
) {
402 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
403 io_ctl
->size
-= sizeof(u64
) +
404 (sizeof(u32
) * io_ctl
->num_pages
);
406 io_ctl
->cur
+= sizeof(u64
);
407 io_ctl
->size
-= sizeof(u64
) * 2;
411 if (le64_to_cpu(*gen
) != generation
) {
412 printk_ratelimited(KERN_ERR
"btrfs: space cache generation "
413 "(%Lu) does not match inode (%Lu)\n", *gen
,
415 io_ctl_unmap_page(io_ctl
);
418 io_ctl
->cur
+= sizeof(u64
);
422 static void io_ctl_set_crc(struct io_ctl
*io_ctl
, int index
)
428 if (!io_ctl
->check_crcs
) {
429 io_ctl_unmap_page(io_ctl
);
434 offset
= sizeof(u32
) * io_ctl
->num_pages
;
436 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
437 PAGE_CACHE_SIZE
- offset
);
438 btrfs_csum_final(crc
, (char *)&crc
);
439 io_ctl_unmap_page(io_ctl
);
440 tmp
= kmap(io_ctl
->pages
[0]);
443 kunmap(io_ctl
->pages
[0]);
446 static int io_ctl_check_crc(struct io_ctl
*io_ctl
, int index
)
452 if (!io_ctl
->check_crcs
) {
453 io_ctl_map_page(io_ctl
, 0);
458 offset
= sizeof(u32
) * io_ctl
->num_pages
;
460 tmp
= kmap(io_ctl
->pages
[0]);
463 kunmap(io_ctl
->pages
[0]);
465 io_ctl_map_page(io_ctl
, 0);
466 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
467 PAGE_CACHE_SIZE
- offset
);
468 btrfs_csum_final(crc
, (char *)&crc
);
470 printk_ratelimited(KERN_ERR
"btrfs: csum mismatch on free "
472 io_ctl_unmap_page(io_ctl
);
479 static int io_ctl_add_entry(struct io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
482 struct btrfs_free_space_entry
*entry
;
488 entry
->offset
= cpu_to_le64(offset
);
489 entry
->bytes
= cpu_to_le64(bytes
);
490 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
491 BTRFS_FREE_SPACE_EXTENT
;
492 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
493 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
495 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
498 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
500 /* No more pages to map */
501 if (io_ctl
->index
>= io_ctl
->num_pages
)
504 /* map the next page */
505 io_ctl_map_page(io_ctl
, 1);
509 static int io_ctl_add_bitmap(struct io_ctl
*io_ctl
, void *bitmap
)
515 * If we aren't at the start of the current page, unmap this one and
516 * map the next one if there is any left.
518 if (io_ctl
->cur
!= io_ctl
->orig
) {
519 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
520 if (io_ctl
->index
>= io_ctl
->num_pages
)
522 io_ctl_map_page(io_ctl
, 0);
525 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
526 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
527 if (io_ctl
->index
< io_ctl
->num_pages
)
528 io_ctl_map_page(io_ctl
, 0);
532 static void io_ctl_zero_remaining_pages(struct io_ctl
*io_ctl
)
535 * If we're not on the boundary we know we've modified the page and we
536 * need to crc the page.
538 if (io_ctl
->cur
!= io_ctl
->orig
)
539 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
541 io_ctl_unmap_page(io_ctl
);
543 while (io_ctl
->index
< io_ctl
->num_pages
) {
544 io_ctl_map_page(io_ctl
, 1);
545 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
549 static int io_ctl_read_entry(struct io_ctl
*io_ctl
,
550 struct btrfs_free_space
*entry
, u8
*type
)
552 struct btrfs_free_space_entry
*e
;
556 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
562 entry
->offset
= le64_to_cpu(e
->offset
);
563 entry
->bytes
= le64_to_cpu(e
->bytes
);
565 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
566 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
568 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
571 io_ctl_unmap_page(io_ctl
);
576 static int io_ctl_read_bitmap(struct io_ctl
*io_ctl
,
577 struct btrfs_free_space
*entry
)
581 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
585 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
586 io_ctl_unmap_page(io_ctl
);
592 * Since we attach pinned extents after the fact we can have contiguous sections
593 * of free space that are split up in entries. This poses a problem with the
594 * tree logging stuff since it could have allocated across what appears to be 2
595 * entries since we would have merged the entries when adding the pinned extents
596 * back to the free space cache. So run through the space cache that we just
597 * loaded and merge contiguous entries. This will make the log replay stuff not
598 * blow up and it will make for nicer allocator behavior.
600 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
602 struct btrfs_free_space
*e
, *prev
= NULL
;
606 spin_lock(&ctl
->tree_lock
);
607 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
608 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
611 if (e
->bitmap
|| prev
->bitmap
)
613 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
614 unlink_free_space(ctl
, prev
);
615 unlink_free_space(ctl
, e
);
616 prev
->bytes
+= e
->bytes
;
617 kmem_cache_free(btrfs_free_space_cachep
, e
);
618 link_free_space(ctl
, prev
);
620 spin_unlock(&ctl
->tree_lock
);
626 spin_unlock(&ctl
->tree_lock
);
629 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
630 struct btrfs_free_space_ctl
*ctl
,
631 struct btrfs_path
*path
, u64 offset
)
633 struct btrfs_free_space_header
*header
;
634 struct extent_buffer
*leaf
;
635 struct io_ctl io_ctl
;
636 struct btrfs_key key
;
637 struct btrfs_free_space
*e
, *n
;
638 struct list_head bitmaps
;
645 INIT_LIST_HEAD(&bitmaps
);
647 /* Nothing in the space cache, goodbye */
648 if (!i_size_read(inode
))
651 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
655 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
659 btrfs_release_path(path
);
665 leaf
= path
->nodes
[0];
666 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
667 struct btrfs_free_space_header
);
668 num_entries
= btrfs_free_space_entries(leaf
, header
);
669 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
670 generation
= btrfs_free_space_generation(leaf
, header
);
671 btrfs_release_path(path
);
673 if (BTRFS_I(inode
)->generation
!= generation
) {
674 btrfs_err(root
->fs_info
,
675 "free space inode generation (%llu) "
676 "did not match free space cache generation (%llu)",
677 (unsigned long long)BTRFS_I(inode
)->generation
,
678 (unsigned long long)generation
);
685 ret
= io_ctl_init(&io_ctl
, inode
, root
);
689 ret
= readahead_cache(inode
);
693 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
697 ret
= io_ctl_check_crc(&io_ctl
, 0);
701 ret
= io_ctl_check_generation(&io_ctl
, generation
);
705 while (num_entries
) {
706 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
711 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
713 kmem_cache_free(btrfs_free_space_cachep
, e
);
718 kmem_cache_free(btrfs_free_space_cachep
, e
);
722 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
723 spin_lock(&ctl
->tree_lock
);
724 ret
= link_free_space(ctl
, e
);
725 spin_unlock(&ctl
->tree_lock
);
727 btrfs_err(root
->fs_info
,
728 "Duplicate entries in free space cache, dumping");
729 kmem_cache_free(btrfs_free_space_cachep
, e
);
733 BUG_ON(!num_bitmaps
);
735 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
738 btrfs_free_space_cachep
, e
);
741 spin_lock(&ctl
->tree_lock
);
742 ret
= link_free_space(ctl
, e
);
743 ctl
->total_bitmaps
++;
744 ctl
->op
->recalc_thresholds(ctl
);
745 spin_unlock(&ctl
->tree_lock
);
747 btrfs_err(root
->fs_info
,
748 "Duplicate entries in free space cache, dumping");
749 kmem_cache_free(btrfs_free_space_cachep
, e
);
752 list_add_tail(&e
->list
, &bitmaps
);
758 io_ctl_unmap_page(&io_ctl
);
761 * We add the bitmaps at the end of the entries in order that
762 * the bitmap entries are added to the cache.
764 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
765 list_del_init(&e
->list
);
766 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
771 io_ctl_drop_pages(&io_ctl
);
772 merge_space_tree(ctl
);
775 io_ctl_free(&io_ctl
);
778 io_ctl_drop_pages(&io_ctl
);
779 __btrfs_remove_free_space_cache(ctl
);
783 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
784 struct btrfs_block_group_cache
*block_group
)
786 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
787 struct btrfs_root
*root
= fs_info
->tree_root
;
789 struct btrfs_path
*path
;
792 u64 used
= btrfs_block_group_used(&block_group
->item
);
795 * If this block group has been marked to be cleared for one reason or
796 * another then we can't trust the on disk cache, so just return.
798 spin_lock(&block_group
->lock
);
799 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
800 spin_unlock(&block_group
->lock
);
803 spin_unlock(&block_group
->lock
);
805 path
= btrfs_alloc_path();
808 path
->search_commit_root
= 1;
809 path
->skip_locking
= 1;
811 inode
= lookup_free_space_inode(root
, block_group
, path
);
813 btrfs_free_path(path
);
817 /* We may have converted the inode and made the cache invalid. */
818 spin_lock(&block_group
->lock
);
819 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
820 spin_unlock(&block_group
->lock
);
821 btrfs_free_path(path
);
824 spin_unlock(&block_group
->lock
);
826 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
827 path
, block_group
->key
.objectid
);
828 btrfs_free_path(path
);
832 spin_lock(&ctl
->tree_lock
);
833 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
834 block_group
->bytes_super
));
835 spin_unlock(&ctl
->tree_lock
);
838 __btrfs_remove_free_space_cache(ctl
);
839 btrfs_err(fs_info
, "block group %llu has wrong amount of free space",
840 block_group
->key
.objectid
);
845 /* This cache is bogus, make sure it gets cleared */
846 spin_lock(&block_group
->lock
);
847 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
848 spin_unlock(&block_group
->lock
);
851 btrfs_err(fs_info
, "failed to load free space cache for block group %llu",
852 block_group
->key
.objectid
);
860 * __btrfs_write_out_cache - write out cached info to an inode
861 * @root - the root the inode belongs to
862 * @ctl - the free space cache we are going to write out
863 * @block_group - the block_group for this cache if it belongs to a block_group
864 * @trans - the trans handle
865 * @path - the path to use
866 * @offset - the offset for the key we'll insert
868 * This function writes out a free space cache struct to disk for quick recovery
869 * on mount. This will return 0 if it was successfull in writing the cache out,
870 * and -1 if it was not.
872 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
873 struct btrfs_free_space_ctl
*ctl
,
874 struct btrfs_block_group_cache
*block_group
,
875 struct btrfs_trans_handle
*trans
,
876 struct btrfs_path
*path
, u64 offset
)
878 struct btrfs_free_space_header
*header
;
879 struct extent_buffer
*leaf
;
880 struct rb_node
*node
;
881 struct list_head
*pos
, *n
;
882 struct extent_state
*cached_state
= NULL
;
883 struct btrfs_free_cluster
*cluster
= NULL
;
884 struct extent_io_tree
*unpin
= NULL
;
885 struct io_ctl io_ctl
;
886 struct list_head bitmap_list
;
887 struct btrfs_key key
;
888 u64 start
, extent_start
, extent_end
, len
;
894 INIT_LIST_HEAD(&bitmap_list
);
896 if (!i_size_read(inode
))
899 ret
= io_ctl_init(&io_ctl
, inode
, root
);
903 /* Get the cluster for this block_group if it exists */
904 if (block_group
&& !list_empty(&block_group
->cluster_list
))
905 cluster
= list_entry(block_group
->cluster_list
.next
,
906 struct btrfs_free_cluster
,
909 /* Lock all pages first so we can lock the extent safely. */
910 io_ctl_prepare_pages(&io_ctl
, inode
, 0);
912 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
915 node
= rb_first(&ctl
->free_space_offset
);
916 if (!node
&& cluster
) {
917 node
= rb_first(&cluster
->root
);
921 /* Make sure we can fit our crcs into the first page */
922 if (io_ctl
.check_crcs
&&
923 (io_ctl
.num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
) {
928 io_ctl_set_generation(&io_ctl
, trans
->transid
);
930 /* Write out the extent entries */
932 struct btrfs_free_space
*e
;
934 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
937 ret
= io_ctl_add_entry(&io_ctl
, e
->offset
, e
->bytes
,
943 list_add_tail(&e
->list
, &bitmap_list
);
946 node
= rb_next(node
);
947 if (!node
&& cluster
) {
948 node
= rb_first(&cluster
->root
);
954 * We want to add any pinned extents to our free space cache
955 * so we don't leak the space
959 * We shouldn't have switched the pinned extents yet so this is the
962 unpin
= root
->fs_info
->pinned_extents
;
965 start
= block_group
->key
.objectid
;
967 while (block_group
&& (start
< block_group
->key
.objectid
+
968 block_group
->key
.offset
)) {
969 ret
= find_first_extent_bit(unpin
, start
,
970 &extent_start
, &extent_end
,
977 /* This pinned extent is out of our range */
978 if (extent_start
>= block_group
->key
.objectid
+
979 block_group
->key
.offset
)
982 extent_start
= max(extent_start
, start
);
983 extent_end
= min(block_group
->key
.objectid
+
984 block_group
->key
.offset
, extent_end
+ 1);
985 len
= extent_end
- extent_start
;
988 ret
= io_ctl_add_entry(&io_ctl
, extent_start
, len
, NULL
);
995 /* Write out the bitmaps */
996 list_for_each_safe(pos
, n
, &bitmap_list
) {
997 struct btrfs_free_space
*entry
=
998 list_entry(pos
, struct btrfs_free_space
, list
);
1000 ret
= io_ctl_add_bitmap(&io_ctl
, entry
->bitmap
);
1003 list_del_init(&entry
->list
);
1006 /* Zero out the rest of the pages just to make sure */
1007 io_ctl_zero_remaining_pages(&io_ctl
);
1009 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
.pages
, io_ctl
.num_pages
,
1010 0, i_size_read(inode
), &cached_state
);
1011 io_ctl_drop_pages(&io_ctl
);
1012 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1013 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1019 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1021 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
1022 key
.offset
= offset
;
1025 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1027 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1028 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1032 leaf
= path
->nodes
[0];
1034 struct btrfs_key found_key
;
1035 BUG_ON(!path
->slots
[0]);
1037 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1038 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
1039 found_key
.offset
!= offset
) {
1040 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
1042 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1044 btrfs_release_path(path
);
1049 BTRFS_I(inode
)->generation
= trans
->transid
;
1050 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1051 struct btrfs_free_space_header
);
1052 btrfs_set_free_space_entries(leaf
, header
, entries
);
1053 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1054 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1055 btrfs_mark_buffer_dirty(leaf
);
1056 btrfs_release_path(path
);
1060 io_ctl_free(&io_ctl
);
1062 invalidate_inode_pages2(inode
->i_mapping
);
1063 BTRFS_I(inode
)->generation
= 0;
1065 btrfs_update_inode(trans
, root
, inode
);
1069 list_for_each_safe(pos
, n
, &bitmap_list
) {
1070 struct btrfs_free_space
*entry
=
1071 list_entry(pos
, struct btrfs_free_space
, list
);
1072 list_del_init(&entry
->list
);
1074 io_ctl_drop_pages(&io_ctl
);
1075 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1076 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1080 int btrfs_write_out_cache(struct btrfs_root
*root
,
1081 struct btrfs_trans_handle
*trans
,
1082 struct btrfs_block_group_cache
*block_group
,
1083 struct btrfs_path
*path
)
1085 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1086 struct inode
*inode
;
1089 root
= root
->fs_info
->tree_root
;
1091 spin_lock(&block_group
->lock
);
1092 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1093 spin_unlock(&block_group
->lock
);
1096 spin_unlock(&block_group
->lock
);
1098 inode
= lookup_free_space_inode(root
, block_group
, path
);
1102 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
1103 path
, block_group
->key
.objectid
);
1105 spin_lock(&block_group
->lock
);
1106 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1107 spin_unlock(&block_group
->lock
);
1110 btrfs_err(root
->fs_info
,
1111 "failed to write free space cache for block group %llu",
1112 block_group
->key
.objectid
);
1120 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1123 BUG_ON(offset
< bitmap_start
);
1124 offset
-= bitmap_start
;
1125 return (unsigned long)(div_u64(offset
, unit
));
1128 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1130 return (unsigned long)(div_u64(bytes
, unit
));
1133 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1137 u64 bytes_per_bitmap
;
1139 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1140 bitmap_start
= offset
- ctl
->start
;
1141 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1142 bitmap_start
*= bytes_per_bitmap
;
1143 bitmap_start
+= ctl
->start
;
1145 return bitmap_start
;
1148 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1149 struct rb_node
*node
, int bitmap
)
1151 struct rb_node
**p
= &root
->rb_node
;
1152 struct rb_node
*parent
= NULL
;
1153 struct btrfs_free_space
*info
;
1157 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1159 if (offset
< info
->offset
) {
1161 } else if (offset
> info
->offset
) {
1162 p
= &(*p
)->rb_right
;
1165 * we could have a bitmap entry and an extent entry
1166 * share the same offset. If this is the case, we want
1167 * the extent entry to always be found first if we do a
1168 * linear search through the tree, since we want to have
1169 * the quickest allocation time, and allocating from an
1170 * extent is faster than allocating from a bitmap. So
1171 * if we're inserting a bitmap and we find an entry at
1172 * this offset, we want to go right, or after this entry
1173 * logically. If we are inserting an extent and we've
1174 * found a bitmap, we want to go left, or before
1182 p
= &(*p
)->rb_right
;
1184 if (!info
->bitmap
) {
1193 rb_link_node(node
, parent
, p
);
1194 rb_insert_color(node
, root
);
1200 * searches the tree for the given offset.
1202 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1203 * want a section that has at least bytes size and comes at or after the given
1206 static struct btrfs_free_space
*
1207 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1208 u64 offset
, int bitmap_only
, int fuzzy
)
1210 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1211 struct btrfs_free_space
*entry
, *prev
= NULL
;
1213 /* find entry that is closest to the 'offset' */
1220 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1223 if (offset
< entry
->offset
)
1225 else if (offset
> entry
->offset
)
1238 * bitmap entry and extent entry may share same offset,
1239 * in that case, bitmap entry comes after extent entry.
1244 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1245 if (entry
->offset
!= offset
)
1248 WARN_ON(!entry
->bitmap
);
1251 if (entry
->bitmap
) {
1253 * if previous extent entry covers the offset,
1254 * we should return it instead of the bitmap entry
1256 n
= rb_prev(&entry
->offset_index
);
1258 prev
= rb_entry(n
, struct btrfs_free_space
,
1260 if (!prev
->bitmap
&&
1261 prev
->offset
+ prev
->bytes
> offset
)
1271 /* find last entry before the 'offset' */
1273 if (entry
->offset
> offset
) {
1274 n
= rb_prev(&entry
->offset_index
);
1276 entry
= rb_entry(n
, struct btrfs_free_space
,
1278 BUG_ON(entry
->offset
> offset
);
1287 if (entry
->bitmap
) {
1288 n
= rb_prev(&entry
->offset_index
);
1290 prev
= rb_entry(n
, struct btrfs_free_space
,
1292 if (!prev
->bitmap
&&
1293 prev
->offset
+ prev
->bytes
> offset
)
1296 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1298 } else if (entry
->offset
+ entry
->bytes
> offset
)
1305 if (entry
->bitmap
) {
1306 if (entry
->offset
+ BITS_PER_BITMAP
*
1310 if (entry
->offset
+ entry
->bytes
> offset
)
1314 n
= rb_next(&entry
->offset_index
);
1317 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1323 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1324 struct btrfs_free_space
*info
)
1326 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1327 ctl
->free_extents
--;
1330 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1331 struct btrfs_free_space
*info
)
1333 __unlink_free_space(ctl
, info
);
1334 ctl
->free_space
-= info
->bytes
;
1337 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1338 struct btrfs_free_space
*info
)
1342 BUG_ON(!info
->bitmap
&& !info
->bytes
);
1343 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1344 &info
->offset_index
, (info
->bitmap
!= NULL
));
1348 ctl
->free_space
+= info
->bytes
;
1349 ctl
->free_extents
++;
1353 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1355 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1359 u64 size
= block_group
->key
.offset
;
1360 u64 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1361 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1363 max_bitmaps
= max(max_bitmaps
, 1);
1365 BUG_ON(ctl
->total_bitmaps
> max_bitmaps
);
1368 * The goal is to keep the total amount of memory used per 1gb of space
1369 * at or below 32k, so we need to adjust how much memory we allow to be
1370 * used by extent based free space tracking
1372 if (size
< 1024 * 1024 * 1024)
1373 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1375 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1376 div64_u64(size
, 1024 * 1024 * 1024);
1379 * we want to account for 1 more bitmap than what we have so we can make
1380 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1381 * we add more bitmaps.
1383 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1385 if (bitmap_bytes
>= max_bytes
) {
1386 ctl
->extents_thresh
= 0;
1391 * we want the extent entry threshold to always be at most 1/2 the maxw
1392 * bytes we can have, or whatever is less than that.
1394 extent_bytes
= max_bytes
- bitmap_bytes
;
1395 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1397 ctl
->extents_thresh
=
1398 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1401 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1402 struct btrfs_free_space
*info
,
1403 u64 offset
, u64 bytes
)
1405 unsigned long start
, count
;
1407 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1408 count
= bytes_to_bits(bytes
, ctl
->unit
);
1409 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1411 bitmap_clear(info
->bitmap
, start
, count
);
1413 info
->bytes
-= bytes
;
1416 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1417 struct btrfs_free_space
*info
, u64 offset
,
1420 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1421 ctl
->free_space
-= bytes
;
1424 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1425 struct btrfs_free_space
*info
, u64 offset
,
1428 unsigned long start
, count
;
1430 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1431 count
= bytes_to_bits(bytes
, ctl
->unit
);
1432 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1434 bitmap_set(info
->bitmap
, start
, count
);
1436 info
->bytes
+= bytes
;
1437 ctl
->free_space
+= bytes
;
1440 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1441 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1444 unsigned long found_bits
= 0;
1445 unsigned long bits
, i
;
1446 unsigned long next_zero
;
1448 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1449 max_t(u64
, *offset
, bitmap_info
->offset
));
1450 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1452 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1453 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1454 BITS_PER_BITMAP
, i
);
1455 if ((next_zero
- i
) >= bits
) {
1456 found_bits
= next_zero
- i
;
1463 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1464 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1471 static struct btrfs_free_space
*
1472 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1473 unsigned long align
)
1475 struct btrfs_free_space
*entry
;
1476 struct rb_node
*node
;
1482 if (!ctl
->free_space_offset
.rb_node
)
1485 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1489 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1490 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1491 if (entry
->bytes
< *bytes
)
1494 /* make sure the space returned is big enough
1495 * to match our requested alignment
1497 if (*bytes
>= align
) {
1498 ctl_off
= entry
->offset
- ctl
->start
;
1499 tmp
= ctl_off
+ align
- 1;;
1501 tmp
= tmp
* align
+ ctl
->start
;
1502 align_off
= tmp
- entry
->offset
;
1505 tmp
= entry
->offset
;
1508 if (entry
->bytes
< *bytes
+ align_off
)
1511 if (entry
->bitmap
) {
1512 ret
= search_bitmap(ctl
, entry
, &tmp
, bytes
);
1521 *bytes
= entry
->bytes
- align_off
;
1528 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1529 struct btrfs_free_space
*info
, u64 offset
)
1531 info
->offset
= offset_to_bitmap(ctl
, offset
);
1533 INIT_LIST_HEAD(&info
->list
);
1534 link_free_space(ctl
, info
);
1535 ctl
->total_bitmaps
++;
1537 ctl
->op
->recalc_thresholds(ctl
);
1540 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1541 struct btrfs_free_space
*bitmap_info
)
1543 unlink_free_space(ctl
, bitmap_info
);
1544 kfree(bitmap_info
->bitmap
);
1545 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1546 ctl
->total_bitmaps
--;
1547 ctl
->op
->recalc_thresholds(ctl
);
1550 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1551 struct btrfs_free_space
*bitmap_info
,
1552 u64
*offset
, u64
*bytes
)
1555 u64 search_start
, search_bytes
;
1559 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1562 * We need to search for bits in this bitmap. We could only cover some
1563 * of the extent in this bitmap thanks to how we add space, so we need
1564 * to search for as much as it as we can and clear that amount, and then
1565 * go searching for the next bit.
1567 search_start
= *offset
;
1568 search_bytes
= ctl
->unit
;
1569 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1570 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1571 if (ret
< 0 || search_start
!= *offset
)
1574 /* We may have found more bits than what we need */
1575 search_bytes
= min(search_bytes
, *bytes
);
1577 /* Cannot clear past the end of the bitmap */
1578 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1580 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1581 *offset
+= search_bytes
;
1582 *bytes
-= search_bytes
;
1585 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1586 if (!bitmap_info
->bytes
)
1587 free_bitmap(ctl
, bitmap_info
);
1590 * no entry after this bitmap, but we still have bytes to
1591 * remove, so something has gone wrong.
1596 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1600 * if the next entry isn't a bitmap we need to return to let the
1601 * extent stuff do its work.
1603 if (!bitmap_info
->bitmap
)
1607 * Ok the next item is a bitmap, but it may not actually hold
1608 * the information for the rest of this free space stuff, so
1609 * look for it, and if we don't find it return so we can try
1610 * everything over again.
1612 search_start
= *offset
;
1613 search_bytes
= ctl
->unit
;
1614 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1616 if (ret
< 0 || search_start
!= *offset
)
1620 } else if (!bitmap_info
->bytes
)
1621 free_bitmap(ctl
, bitmap_info
);
1626 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1627 struct btrfs_free_space
*info
, u64 offset
,
1630 u64 bytes_to_set
= 0;
1633 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1635 bytes_to_set
= min(end
- offset
, bytes
);
1637 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1639 return bytes_to_set
;
1643 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1644 struct btrfs_free_space
*info
)
1646 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1649 * If we are below the extents threshold then we can add this as an
1650 * extent, and don't have to deal with the bitmap
1652 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1654 * If this block group has some small extents we don't want to
1655 * use up all of our free slots in the cache with them, we want
1656 * to reserve them to larger extents, however if we have plent
1657 * of cache left then go ahead an dadd them, no sense in adding
1658 * the overhead of a bitmap if we don't have to.
1660 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1661 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1669 * The original block groups from mkfs can be really small, like 8
1670 * megabytes, so don't bother with a bitmap for those entries. However
1671 * some block groups can be smaller than what a bitmap would cover but
1672 * are still large enough that they could overflow the 32k memory limit,
1673 * so allow those block groups to still be allowed to have a bitmap
1676 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
1682 static struct btrfs_free_space_op free_space_op
= {
1683 .recalc_thresholds
= recalculate_thresholds
,
1684 .use_bitmap
= use_bitmap
,
1687 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1688 struct btrfs_free_space
*info
)
1690 struct btrfs_free_space
*bitmap_info
;
1691 struct btrfs_block_group_cache
*block_group
= NULL
;
1693 u64 bytes
, offset
, bytes_added
;
1696 bytes
= info
->bytes
;
1697 offset
= info
->offset
;
1699 if (!ctl
->op
->use_bitmap(ctl
, info
))
1702 if (ctl
->op
== &free_space_op
)
1703 block_group
= ctl
->private;
1706 * Since we link bitmaps right into the cluster we need to see if we
1707 * have a cluster here, and if so and it has our bitmap we need to add
1708 * the free space to that bitmap.
1710 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
1711 struct btrfs_free_cluster
*cluster
;
1712 struct rb_node
*node
;
1713 struct btrfs_free_space
*entry
;
1715 cluster
= list_entry(block_group
->cluster_list
.next
,
1716 struct btrfs_free_cluster
,
1718 spin_lock(&cluster
->lock
);
1719 node
= rb_first(&cluster
->root
);
1721 spin_unlock(&cluster
->lock
);
1722 goto no_cluster_bitmap
;
1725 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1726 if (!entry
->bitmap
) {
1727 spin_unlock(&cluster
->lock
);
1728 goto no_cluster_bitmap
;
1731 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
1732 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
1734 bytes
-= bytes_added
;
1735 offset
+= bytes_added
;
1737 spin_unlock(&cluster
->lock
);
1745 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1752 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
1753 bytes
-= bytes_added
;
1754 offset
+= bytes_added
;
1764 if (info
&& info
->bitmap
) {
1765 add_new_bitmap(ctl
, info
, offset
);
1770 spin_unlock(&ctl
->tree_lock
);
1772 /* no pre-allocated info, allocate a new one */
1774 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1777 spin_lock(&ctl
->tree_lock
);
1783 /* allocate the bitmap */
1784 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1785 spin_lock(&ctl
->tree_lock
);
1786 if (!info
->bitmap
) {
1796 kfree(info
->bitmap
);
1797 kmem_cache_free(btrfs_free_space_cachep
, info
);
1803 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1804 struct btrfs_free_space
*info
, bool update_stat
)
1806 struct btrfs_free_space
*left_info
;
1807 struct btrfs_free_space
*right_info
;
1808 bool merged
= false;
1809 u64 offset
= info
->offset
;
1810 u64 bytes
= info
->bytes
;
1813 * first we want to see if there is free space adjacent to the range we
1814 * are adding, if there is remove that struct and add a new one to
1815 * cover the entire range
1817 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1818 if (right_info
&& rb_prev(&right_info
->offset_index
))
1819 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1820 struct btrfs_free_space
, offset_index
);
1822 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1824 if (right_info
&& !right_info
->bitmap
) {
1826 unlink_free_space(ctl
, right_info
);
1828 __unlink_free_space(ctl
, right_info
);
1829 info
->bytes
+= right_info
->bytes
;
1830 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1834 if (left_info
&& !left_info
->bitmap
&&
1835 left_info
->offset
+ left_info
->bytes
== offset
) {
1837 unlink_free_space(ctl
, left_info
);
1839 __unlink_free_space(ctl
, left_info
);
1840 info
->offset
= left_info
->offset
;
1841 info
->bytes
+= left_info
->bytes
;
1842 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1849 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1850 u64 offset
, u64 bytes
)
1852 struct btrfs_free_space
*info
;
1855 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1859 info
->offset
= offset
;
1860 info
->bytes
= bytes
;
1862 spin_lock(&ctl
->tree_lock
);
1864 if (try_merge_free_space(ctl
, info
, true))
1868 * There was no extent directly to the left or right of this new
1869 * extent then we know we're going to have to allocate a new extent, so
1870 * before we do that see if we need to drop this into a bitmap
1872 ret
= insert_into_bitmap(ctl
, info
);
1880 ret
= link_free_space(ctl
, info
);
1882 kmem_cache_free(btrfs_free_space_cachep
, info
);
1884 spin_unlock(&ctl
->tree_lock
);
1887 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1888 BUG_ON(ret
== -EEXIST
);
1894 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1895 u64 offset
, u64 bytes
)
1897 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1898 struct btrfs_free_space
*info
;
1900 bool re_search
= false;
1902 spin_lock(&ctl
->tree_lock
);
1909 info
= tree_search_offset(ctl
, offset
, 0, 0);
1912 * oops didn't find an extent that matched the space we wanted
1913 * to remove, look for a bitmap instead
1915 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1919 * If we found a partial bit of our free space in a
1920 * bitmap but then couldn't find the other part this may
1921 * be a problem, so WARN about it.
1929 if (!info
->bitmap
) {
1930 unlink_free_space(ctl
, info
);
1931 if (offset
== info
->offset
) {
1932 u64 to_free
= min(bytes
, info
->bytes
);
1934 info
->bytes
-= to_free
;
1935 info
->offset
+= to_free
;
1937 ret
= link_free_space(ctl
, info
);
1940 kmem_cache_free(btrfs_free_space_cachep
, info
);
1947 u64 old_end
= info
->bytes
+ info
->offset
;
1949 info
->bytes
= offset
- info
->offset
;
1950 ret
= link_free_space(ctl
, info
);
1955 /* Not enough bytes in this entry to satisfy us */
1956 if (old_end
< offset
+ bytes
) {
1957 bytes
-= old_end
- offset
;
1960 } else if (old_end
== offset
+ bytes
) {
1964 spin_unlock(&ctl
->tree_lock
);
1966 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
1967 old_end
- (offset
+ bytes
));
1973 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1974 if (ret
== -EAGAIN
) {
1979 spin_unlock(&ctl
->tree_lock
);
1984 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
1987 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1988 struct btrfs_free_space
*info
;
1992 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
1993 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1994 if (info
->bytes
>= bytes
&& !block_group
->ro
)
1996 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
1997 (unsigned long long)info
->offset
,
1998 (unsigned long long)info
->bytes
,
1999 (info
->bitmap
) ? "yes" : "no");
2001 printk(KERN_INFO
"block group has cluster?: %s\n",
2002 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2003 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
2007 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2009 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2011 spin_lock_init(&ctl
->tree_lock
);
2012 ctl
->unit
= block_group
->sectorsize
;
2013 ctl
->start
= block_group
->key
.objectid
;
2014 ctl
->private = block_group
;
2015 ctl
->op
= &free_space_op
;
2018 * we only want to have 32k of ram per block group for keeping
2019 * track of free space, and if we pass 1/2 of that we want to
2020 * start converting things over to using bitmaps
2022 ctl
->extents_thresh
= ((1024 * 32) / 2) /
2023 sizeof(struct btrfs_free_space
);
2027 * for a given cluster, put all of its extents back into the free
2028 * space cache. If the block group passed doesn't match the block group
2029 * pointed to by the cluster, someone else raced in and freed the
2030 * cluster already. In that case, we just return without changing anything
2033 __btrfs_return_cluster_to_free_space(
2034 struct btrfs_block_group_cache
*block_group
,
2035 struct btrfs_free_cluster
*cluster
)
2037 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2038 struct btrfs_free_space
*entry
;
2039 struct rb_node
*node
;
2041 spin_lock(&cluster
->lock
);
2042 if (cluster
->block_group
!= block_group
)
2045 cluster
->block_group
= NULL
;
2046 cluster
->window_start
= 0;
2047 list_del_init(&cluster
->block_group_list
);
2049 node
= rb_first(&cluster
->root
);
2053 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2054 node
= rb_next(&entry
->offset_index
);
2055 rb_erase(&entry
->offset_index
, &cluster
->root
);
2057 bitmap
= (entry
->bitmap
!= NULL
);
2059 try_merge_free_space(ctl
, entry
, false);
2060 tree_insert_offset(&ctl
->free_space_offset
,
2061 entry
->offset
, &entry
->offset_index
, bitmap
);
2063 cluster
->root
= RB_ROOT
;
2066 spin_unlock(&cluster
->lock
);
2067 btrfs_put_block_group(block_group
);
2071 static void __btrfs_remove_free_space_cache_locked(
2072 struct btrfs_free_space_ctl
*ctl
)
2074 struct btrfs_free_space
*info
;
2075 struct rb_node
*node
;
2077 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2078 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2079 if (!info
->bitmap
) {
2080 unlink_free_space(ctl
, info
);
2081 kmem_cache_free(btrfs_free_space_cachep
, info
);
2083 free_bitmap(ctl
, info
);
2085 if (need_resched()) {
2086 spin_unlock(&ctl
->tree_lock
);
2088 spin_lock(&ctl
->tree_lock
);
2093 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2095 spin_lock(&ctl
->tree_lock
);
2096 __btrfs_remove_free_space_cache_locked(ctl
);
2097 spin_unlock(&ctl
->tree_lock
);
2100 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2102 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2103 struct btrfs_free_cluster
*cluster
;
2104 struct list_head
*head
;
2106 spin_lock(&ctl
->tree_lock
);
2107 while ((head
= block_group
->cluster_list
.next
) !=
2108 &block_group
->cluster_list
) {
2109 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2112 WARN_ON(cluster
->block_group
!= block_group
);
2113 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2114 if (need_resched()) {
2115 spin_unlock(&ctl
->tree_lock
);
2117 spin_lock(&ctl
->tree_lock
);
2120 __btrfs_remove_free_space_cache_locked(ctl
);
2121 spin_unlock(&ctl
->tree_lock
);
2125 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2126 u64 offset
, u64 bytes
, u64 empty_size
)
2128 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2129 struct btrfs_free_space
*entry
= NULL
;
2130 u64 bytes_search
= bytes
+ empty_size
;
2133 u64 align_gap_len
= 0;
2135 spin_lock(&ctl
->tree_lock
);
2136 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2137 block_group
->full_stripe_len
);
2142 if (entry
->bitmap
) {
2143 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2145 free_bitmap(ctl
, entry
);
2148 unlink_free_space(ctl
, entry
);
2149 align_gap_len
= offset
- entry
->offset
;
2150 align_gap
= entry
->offset
;
2152 entry
->offset
= offset
+ bytes
;
2153 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2155 entry
->bytes
-= bytes
+ align_gap_len
;
2157 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2159 link_free_space(ctl
, entry
);
2163 spin_unlock(&ctl
->tree_lock
);
2166 __btrfs_add_free_space(ctl
, align_gap
, align_gap_len
);
2171 * given a cluster, put all of its extents back into the free space
2172 * cache. If a block group is passed, this function will only free
2173 * a cluster that belongs to the passed block group.
2175 * Otherwise, it'll get a reference on the block group pointed to by the
2176 * cluster and remove the cluster from it.
2178 int btrfs_return_cluster_to_free_space(
2179 struct btrfs_block_group_cache
*block_group
,
2180 struct btrfs_free_cluster
*cluster
)
2182 struct btrfs_free_space_ctl
*ctl
;
2185 /* first, get a safe pointer to the block group */
2186 spin_lock(&cluster
->lock
);
2188 block_group
= cluster
->block_group
;
2190 spin_unlock(&cluster
->lock
);
2193 } else if (cluster
->block_group
!= block_group
) {
2194 /* someone else has already freed it don't redo their work */
2195 spin_unlock(&cluster
->lock
);
2198 atomic_inc(&block_group
->count
);
2199 spin_unlock(&cluster
->lock
);
2201 ctl
= block_group
->free_space_ctl
;
2203 /* now return any extents the cluster had on it */
2204 spin_lock(&ctl
->tree_lock
);
2205 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2206 spin_unlock(&ctl
->tree_lock
);
2208 /* finally drop our ref */
2209 btrfs_put_block_group(block_group
);
2213 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2214 struct btrfs_free_cluster
*cluster
,
2215 struct btrfs_free_space
*entry
,
2216 u64 bytes
, u64 min_start
)
2218 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2220 u64 search_start
= cluster
->window_start
;
2221 u64 search_bytes
= bytes
;
2224 search_start
= min_start
;
2225 search_bytes
= bytes
;
2227 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2232 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2238 * given a cluster, try to allocate 'bytes' from it, returns 0
2239 * if it couldn't find anything suitably large, or a logical disk offset
2240 * if things worked out
2242 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2243 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2246 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2247 struct btrfs_free_space
*entry
= NULL
;
2248 struct rb_node
*node
;
2251 spin_lock(&cluster
->lock
);
2252 if (bytes
> cluster
->max_size
)
2255 if (cluster
->block_group
!= block_group
)
2258 node
= rb_first(&cluster
->root
);
2262 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2264 if (entry
->bytes
< bytes
||
2265 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2266 node
= rb_next(&entry
->offset_index
);
2269 entry
= rb_entry(node
, struct btrfs_free_space
,
2274 if (entry
->bitmap
) {
2275 ret
= btrfs_alloc_from_bitmap(block_group
,
2276 cluster
, entry
, bytes
,
2277 cluster
->window_start
);
2279 node
= rb_next(&entry
->offset_index
);
2282 entry
= rb_entry(node
, struct btrfs_free_space
,
2286 cluster
->window_start
+= bytes
;
2288 ret
= entry
->offset
;
2290 entry
->offset
+= bytes
;
2291 entry
->bytes
-= bytes
;
2294 if (entry
->bytes
== 0)
2295 rb_erase(&entry
->offset_index
, &cluster
->root
);
2299 spin_unlock(&cluster
->lock
);
2304 spin_lock(&ctl
->tree_lock
);
2306 ctl
->free_space
-= bytes
;
2307 if (entry
->bytes
== 0) {
2308 ctl
->free_extents
--;
2309 if (entry
->bitmap
) {
2310 kfree(entry
->bitmap
);
2311 ctl
->total_bitmaps
--;
2312 ctl
->op
->recalc_thresholds(ctl
);
2314 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2317 spin_unlock(&ctl
->tree_lock
);
2322 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2323 struct btrfs_free_space
*entry
,
2324 struct btrfs_free_cluster
*cluster
,
2325 u64 offset
, u64 bytes
,
2326 u64 cont1_bytes
, u64 min_bytes
)
2328 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2329 unsigned long next_zero
;
2331 unsigned long want_bits
;
2332 unsigned long min_bits
;
2333 unsigned long found_bits
;
2334 unsigned long start
= 0;
2335 unsigned long total_found
= 0;
2338 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2339 max_t(u64
, offset
, entry
->offset
));
2340 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2341 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2345 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2346 next_zero
= find_next_zero_bit(entry
->bitmap
,
2347 BITS_PER_BITMAP
, i
);
2348 if (next_zero
- i
>= min_bits
) {
2349 found_bits
= next_zero
- i
;
2360 cluster
->max_size
= 0;
2363 total_found
+= found_bits
;
2365 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2366 cluster
->max_size
= found_bits
* ctl
->unit
;
2368 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2373 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2374 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2375 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2376 &entry
->offset_index
, 1);
2377 BUG_ON(ret
); /* -EEXIST; Logic error */
2379 trace_btrfs_setup_cluster(block_group
, cluster
,
2380 total_found
* ctl
->unit
, 1);
2385 * This searches the block group for just extents to fill the cluster with.
2386 * Try to find a cluster with at least bytes total bytes, at least one
2387 * extent of cont1_bytes, and other clusters of at least min_bytes.
2390 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2391 struct btrfs_free_cluster
*cluster
,
2392 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2393 u64 cont1_bytes
, u64 min_bytes
)
2395 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2396 struct btrfs_free_space
*first
= NULL
;
2397 struct btrfs_free_space
*entry
= NULL
;
2398 struct btrfs_free_space
*last
;
2399 struct rb_node
*node
;
2405 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2410 * We don't want bitmaps, so just move along until we find a normal
2413 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2414 if (entry
->bitmap
&& list_empty(&entry
->list
))
2415 list_add_tail(&entry
->list
, bitmaps
);
2416 node
= rb_next(&entry
->offset_index
);
2419 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2422 window_start
= entry
->offset
;
2423 window_free
= entry
->bytes
;
2424 max_extent
= entry
->bytes
;
2428 for (node
= rb_next(&entry
->offset_index
); node
;
2429 node
= rb_next(&entry
->offset_index
)) {
2430 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2432 if (entry
->bitmap
) {
2433 if (list_empty(&entry
->list
))
2434 list_add_tail(&entry
->list
, bitmaps
);
2438 if (entry
->bytes
< min_bytes
)
2442 window_free
+= entry
->bytes
;
2443 if (entry
->bytes
> max_extent
)
2444 max_extent
= entry
->bytes
;
2447 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2450 cluster
->window_start
= first
->offset
;
2452 node
= &first
->offset_index
;
2455 * now we've found our entries, pull them out of the free space
2456 * cache and put them into the cluster rbtree
2461 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2462 node
= rb_next(&entry
->offset_index
);
2463 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2466 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2467 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2468 &entry
->offset_index
, 0);
2469 total_size
+= entry
->bytes
;
2470 BUG_ON(ret
); /* -EEXIST; Logic error */
2471 } while (node
&& entry
!= last
);
2473 cluster
->max_size
= max_extent
;
2474 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2479 * This specifically looks for bitmaps that may work in the cluster, we assume
2480 * that we have already failed to find extents that will work.
2483 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2484 struct btrfs_free_cluster
*cluster
,
2485 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2486 u64 cont1_bytes
, u64 min_bytes
)
2488 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2489 struct btrfs_free_space
*entry
;
2491 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2493 if (ctl
->total_bitmaps
== 0)
2497 * The bitmap that covers offset won't be in the list unless offset
2498 * is just its start offset.
2500 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2501 if (entry
->offset
!= bitmap_offset
) {
2502 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2503 if (entry
&& list_empty(&entry
->list
))
2504 list_add(&entry
->list
, bitmaps
);
2507 list_for_each_entry(entry
, bitmaps
, list
) {
2508 if (entry
->bytes
< bytes
)
2510 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2511 bytes
, cont1_bytes
, min_bytes
);
2517 * The bitmaps list has all the bitmaps that record free space
2518 * starting after offset, so no more search is required.
2524 * here we try to find a cluster of blocks in a block group. The goal
2525 * is to find at least bytes+empty_size.
2526 * We might not find them all in one contiguous area.
2528 * returns zero and sets up cluster if things worked out, otherwise
2529 * it returns -enospc
2531 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
2532 struct btrfs_root
*root
,
2533 struct btrfs_block_group_cache
*block_group
,
2534 struct btrfs_free_cluster
*cluster
,
2535 u64 offset
, u64 bytes
, u64 empty_size
)
2537 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2538 struct btrfs_free_space
*entry
, *tmp
;
2545 * Choose the minimum extent size we'll require for this
2546 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2547 * For metadata, allow allocates with smaller extents. For
2548 * data, keep it dense.
2550 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2551 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
2552 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2553 cont1_bytes
= bytes
;
2554 min_bytes
= block_group
->sectorsize
;
2556 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2557 min_bytes
= block_group
->sectorsize
;
2560 spin_lock(&ctl
->tree_lock
);
2563 * If we know we don't have enough space to make a cluster don't even
2564 * bother doing all the work to try and find one.
2566 if (ctl
->free_space
< bytes
) {
2567 spin_unlock(&ctl
->tree_lock
);
2571 spin_lock(&cluster
->lock
);
2573 /* someone already found a cluster, hooray */
2574 if (cluster
->block_group
) {
2579 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
2582 INIT_LIST_HEAD(&bitmaps
);
2583 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
2585 cont1_bytes
, min_bytes
);
2587 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
2588 offset
, bytes
+ empty_size
,
2589 cont1_bytes
, min_bytes
);
2591 /* Clear our temporary list */
2592 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
2593 list_del_init(&entry
->list
);
2596 atomic_inc(&block_group
->count
);
2597 list_add_tail(&cluster
->block_group_list
,
2598 &block_group
->cluster_list
);
2599 cluster
->block_group
= block_group
;
2601 trace_btrfs_failed_cluster_setup(block_group
);
2604 spin_unlock(&cluster
->lock
);
2605 spin_unlock(&ctl
->tree_lock
);
2611 * simple code to zero out a cluster
2613 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2615 spin_lock_init(&cluster
->lock
);
2616 spin_lock_init(&cluster
->refill_lock
);
2617 cluster
->root
= RB_ROOT
;
2618 cluster
->max_size
= 0;
2619 INIT_LIST_HEAD(&cluster
->block_group_list
);
2620 cluster
->block_group
= NULL
;
2623 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
2624 u64
*total_trimmed
, u64 start
, u64 bytes
,
2625 u64 reserved_start
, u64 reserved_bytes
)
2627 struct btrfs_space_info
*space_info
= block_group
->space_info
;
2628 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2633 spin_lock(&space_info
->lock
);
2634 spin_lock(&block_group
->lock
);
2635 if (!block_group
->ro
) {
2636 block_group
->reserved
+= reserved_bytes
;
2637 space_info
->bytes_reserved
+= reserved_bytes
;
2640 spin_unlock(&block_group
->lock
);
2641 spin_unlock(&space_info
->lock
);
2643 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2644 start
, bytes
, &trimmed
);
2646 *total_trimmed
+= trimmed
;
2648 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
2651 spin_lock(&space_info
->lock
);
2652 spin_lock(&block_group
->lock
);
2653 if (block_group
->ro
)
2654 space_info
->bytes_readonly
+= reserved_bytes
;
2655 block_group
->reserved
-= reserved_bytes
;
2656 space_info
->bytes_reserved
-= reserved_bytes
;
2657 spin_unlock(&space_info
->lock
);
2658 spin_unlock(&block_group
->lock
);
2664 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2665 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2667 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2668 struct btrfs_free_space
*entry
;
2669 struct rb_node
*node
;
2675 while (start
< end
) {
2676 spin_lock(&ctl
->tree_lock
);
2678 if (ctl
->free_space
< minlen
) {
2679 spin_unlock(&ctl
->tree_lock
);
2683 entry
= tree_search_offset(ctl
, start
, 0, 1);
2685 spin_unlock(&ctl
->tree_lock
);
2690 while (entry
->bitmap
) {
2691 node
= rb_next(&entry
->offset_index
);
2693 spin_unlock(&ctl
->tree_lock
);
2696 entry
= rb_entry(node
, struct btrfs_free_space
,
2700 if (entry
->offset
>= end
) {
2701 spin_unlock(&ctl
->tree_lock
);
2705 extent_start
= entry
->offset
;
2706 extent_bytes
= entry
->bytes
;
2707 start
= max(start
, extent_start
);
2708 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
2709 if (bytes
< minlen
) {
2710 spin_unlock(&ctl
->tree_lock
);
2714 unlink_free_space(ctl
, entry
);
2715 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2717 spin_unlock(&ctl
->tree_lock
);
2719 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
2720 extent_start
, extent_bytes
);
2726 if (fatal_signal_pending(current
)) {
2737 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
2738 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2740 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2741 struct btrfs_free_space
*entry
;
2745 u64 offset
= offset_to_bitmap(ctl
, start
);
2747 while (offset
< end
) {
2748 bool next_bitmap
= false;
2750 spin_lock(&ctl
->tree_lock
);
2752 if (ctl
->free_space
< minlen
) {
2753 spin_unlock(&ctl
->tree_lock
);
2757 entry
= tree_search_offset(ctl
, offset
, 1, 0);
2759 spin_unlock(&ctl
->tree_lock
);
2765 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2766 if (ret2
|| start
>= end
) {
2767 spin_unlock(&ctl
->tree_lock
);
2772 bytes
= min(bytes
, end
- start
);
2773 if (bytes
< minlen
) {
2774 spin_unlock(&ctl
->tree_lock
);
2778 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2779 if (entry
->bytes
== 0)
2780 free_bitmap(ctl
, entry
);
2782 spin_unlock(&ctl
->tree_lock
);
2784 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
2790 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
2793 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
2794 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
2797 if (fatal_signal_pending(current
)) {
2808 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2809 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2815 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
2819 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
2825 * Find the left-most item in the cache tree, and then return the
2826 * smallest inode number in the item.
2828 * Note: the returned inode number may not be the smallest one in
2829 * the tree, if the left-most item is a bitmap.
2831 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2833 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2834 struct btrfs_free_space
*entry
= NULL
;
2837 spin_lock(&ctl
->tree_lock
);
2839 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2842 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2843 struct btrfs_free_space
, offset_index
);
2845 if (!entry
->bitmap
) {
2846 ino
= entry
->offset
;
2848 unlink_free_space(ctl
, entry
);
2852 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2854 link_free_space(ctl
, entry
);
2860 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2861 /* Logic error; Should be empty if it can't find anything */
2865 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2866 if (entry
->bytes
== 0)
2867 free_bitmap(ctl
, entry
);
2870 spin_unlock(&ctl
->tree_lock
);
2875 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
2876 struct btrfs_path
*path
)
2878 struct inode
*inode
= NULL
;
2880 spin_lock(&root
->cache_lock
);
2881 if (root
->cache_inode
)
2882 inode
= igrab(root
->cache_inode
);
2883 spin_unlock(&root
->cache_lock
);
2887 inode
= __lookup_free_space_inode(root
, path
, 0);
2891 spin_lock(&root
->cache_lock
);
2892 if (!btrfs_fs_closing(root
->fs_info
))
2893 root
->cache_inode
= igrab(inode
);
2894 spin_unlock(&root
->cache_lock
);
2899 int create_free_ino_inode(struct btrfs_root
*root
,
2900 struct btrfs_trans_handle
*trans
,
2901 struct btrfs_path
*path
)
2903 return __create_free_space_inode(root
, trans
, path
,
2904 BTRFS_FREE_INO_OBJECTID
, 0);
2907 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2909 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2910 struct btrfs_path
*path
;
2911 struct inode
*inode
;
2913 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
2915 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2919 * If we're unmounting then just return, since this does a search on the
2920 * normal root and not the commit root and we could deadlock.
2922 if (btrfs_fs_closing(fs_info
))
2925 path
= btrfs_alloc_path();
2929 inode
= lookup_free_ino_inode(root
, path
);
2933 if (root_gen
!= BTRFS_I(inode
)->generation
)
2936 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
2940 "failed to load free ino cache for root %llu",
2941 root
->root_key
.objectid
);
2945 btrfs_free_path(path
);
2949 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
2950 struct btrfs_trans_handle
*trans
,
2951 struct btrfs_path
*path
)
2953 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2954 struct inode
*inode
;
2957 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2960 inode
= lookup_free_ino_inode(root
, path
);
2964 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, trans
, path
, 0);
2966 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
2968 btrfs_err(root
->fs_info
,
2969 "failed to write free ino cache for root %llu",
2970 root
->root_key
.objectid
);
2978 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
2979 static struct btrfs_block_group_cache
*init_test_block_group(void)
2981 struct btrfs_block_group_cache
*cache
;
2983 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
2986 cache
->free_space_ctl
= kzalloc(sizeof(*cache
->free_space_ctl
),
2988 if (!cache
->free_space_ctl
) {
2993 cache
->key
.objectid
= 0;
2994 cache
->key
.offset
= 1024 * 1024 * 1024;
2995 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
2996 cache
->sectorsize
= 4096;
2998 spin_lock_init(&cache
->lock
);
2999 INIT_LIST_HEAD(&cache
->list
);
3000 INIT_LIST_HEAD(&cache
->cluster_list
);
3001 INIT_LIST_HEAD(&cache
->new_bg_list
);
3003 btrfs_init_free_space_ctl(cache
);
3009 * Checks to see if the given range is in the free space cache. This is really
3010 * just used to check the absence of space, so if there is free space in the
3011 * range at all we will return 1.
3013 static int check_exists(struct btrfs_block_group_cache
*cache
, u64 offset
,
3016 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3017 struct btrfs_free_space
*info
;
3020 spin_lock(&ctl
->tree_lock
);
3021 info
= tree_search_offset(ctl
, offset
, 0, 0);
3023 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3031 u64 bit_off
, bit_bytes
;
3033 struct btrfs_free_space
*tmp
;
3036 bit_bytes
= ctl
->unit
;
3037 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
);
3039 if (bit_off
== offset
) {
3042 } else if (bit_off
> offset
&&
3043 offset
+ bytes
> bit_off
) {
3049 n
= rb_prev(&info
->offset_index
);
3051 tmp
= rb_entry(n
, struct btrfs_free_space
,
3053 if (tmp
->offset
+ tmp
->bytes
< offset
)
3055 if (offset
+ bytes
< tmp
->offset
) {
3056 n
= rb_prev(&info
->offset_index
);
3063 n
= rb_next(&info
->offset_index
);
3065 tmp
= rb_entry(n
, struct btrfs_free_space
,
3067 if (offset
+ bytes
< tmp
->offset
)
3069 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3070 n
= rb_next(&info
->offset_index
);
3080 if (info
->offset
== offset
) {
3085 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3088 spin_unlock(&ctl
->tree_lock
);
3093 * Use this if you need to make a bitmap or extent entry specifically, it
3094 * doesn't do any of the merging that add_free_space does, this acts a lot like
3095 * how the free space cache loading stuff works, so you can get really weird
3098 static int add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3099 u64 offset
, u64 bytes
, bool bitmap
)
3101 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3102 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3109 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3115 spin_lock(&ctl
->tree_lock
);
3116 info
->offset
= offset
;
3117 info
->bytes
= bytes
;
3118 ret
= link_free_space(ctl
, info
);
3119 spin_unlock(&ctl
->tree_lock
);
3121 kmem_cache_free(btrfs_free_space_cachep
, info
);
3126 map
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
3128 kmem_cache_free(btrfs_free_space_cachep
, info
);
3133 spin_lock(&ctl
->tree_lock
);
3134 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3139 add_new_bitmap(ctl
, info
, offset
);
3143 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3144 bytes
-= bytes_added
;
3145 offset
+= bytes_added
;
3146 spin_unlock(&ctl
->tree_lock
);
3157 * This test just does basic sanity checking, making sure we can add an exten
3158 * entry and remove space from either end and the middle, and make sure we can
3159 * remove space that covers adjacent extent entries.
3161 static int test_extents(struct btrfs_block_group_cache
*cache
)
3165 printk(KERN_ERR
"Running extent only tests\n");
3167 /* First just make sure we can remove an entire entry */
3168 ret
= btrfs_add_free_space(cache
, 0, 4 * 1024 * 1024);
3170 printk(KERN_ERR
"Error adding initial extents %d\n", ret
);
3174 ret
= btrfs_remove_free_space(cache
, 0, 4 * 1024 * 1024);
3176 printk(KERN_ERR
"Error removing extent %d\n", ret
);
3180 if (check_exists(cache
, 0, 4 * 1024 * 1024)) {
3181 printk(KERN_ERR
"Full remove left some lingering space\n");
3185 /* Ok edge and middle cases now */
3186 ret
= btrfs_add_free_space(cache
, 0, 4 * 1024 * 1024);
3188 printk(KERN_ERR
"Error adding half extent %d\n", ret
);
3192 ret
= btrfs_remove_free_space(cache
, 3 * 1024 * 1024, 1 * 1024 * 1024);
3194 printk(KERN_ERR
"Error removing tail end %d\n", ret
);
3198 ret
= btrfs_remove_free_space(cache
, 0, 1 * 1024 * 1024);
3200 printk(KERN_ERR
"Error removing front end %d\n", ret
);
3204 ret
= btrfs_remove_free_space(cache
, 2 * 1024 * 1024, 4096);
3206 printk(KERN_ERR
"Error removing middle peice %d\n", ret
);
3210 if (check_exists(cache
, 0, 1 * 1024 * 1024)) {
3211 printk(KERN_ERR
"Still have space at the front\n");
3215 if (check_exists(cache
, 2 * 1024 * 1024, 4096)) {
3216 printk(KERN_ERR
"Still have space in the middle\n");
3220 if (check_exists(cache
, 3 * 1024 * 1024, 1 * 1024 * 1024)) {
3221 printk(KERN_ERR
"Still have space at the end\n");
3226 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3231 static int test_bitmaps(struct btrfs_block_group_cache
*cache
)
3233 u64 next_bitmap_offset
;
3236 printk(KERN_ERR
"Running bitmap only tests\n");
3238 ret
= add_free_space_entry(cache
, 0, 4 * 1024 * 1024, 1);
3240 printk(KERN_ERR
"Couldn't create a bitmap entry %d\n", ret
);
3244 ret
= btrfs_remove_free_space(cache
, 0, 4 * 1024 * 1024);
3246 printk(KERN_ERR
"Error removing bitmap full range %d\n", ret
);
3250 if (check_exists(cache
, 0, 4 * 1024 * 1024)) {
3251 printk(KERN_ERR
"Left some space in bitmap\n");
3255 ret
= add_free_space_entry(cache
, 0, 4 * 1024 * 1024, 1);
3257 printk(KERN_ERR
"Couldn't add to our bitmap entry %d\n", ret
);
3261 ret
= btrfs_remove_free_space(cache
, 1 * 1024 * 1024, 2 * 1024 * 1024);
3263 printk(KERN_ERR
"Couldn't remove middle chunk %d\n", ret
);
3268 * The first bitmap we have starts at offset 0 so the next one is just
3269 * at the end of the first bitmap.
3271 next_bitmap_offset
= (u64
)(BITS_PER_BITMAP
* 4096);
3273 /* Test a bit straddling two bitmaps */
3274 ret
= add_free_space_entry(cache
, next_bitmap_offset
-
3275 (2 * 1024 * 1024), 4 * 1024 * 1024, 1);
3277 printk(KERN_ERR
"Couldn't add space that straddles two bitmaps"
3282 ret
= btrfs_remove_free_space(cache
, next_bitmap_offset
-
3283 (1 * 1024 * 1024), 2 * 1024 * 1024);
3285 printk(KERN_ERR
"Couldn't remove overlapping space %d\n", ret
);
3289 if (check_exists(cache
, next_bitmap_offset
- (1 * 1024 * 1024),
3291 printk(KERN_ERR
"Left some space when removing overlapping\n");
3295 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3300 /* This is the high grade jackassery */
3301 static int test_bitmaps_and_extents(struct btrfs_block_group_cache
*cache
)
3303 u64 bitmap_offset
= (u64
)(BITS_PER_BITMAP
* 4096);
3306 printk(KERN_ERR
"Running bitmap and extent tests\n");
3309 * First let's do something simple, an extent at the same offset as the
3310 * bitmap, but the free space completely in the extent and then
3311 * completely in the bitmap.
3313 ret
= add_free_space_entry(cache
, 4 * 1024 * 1024, 1 * 1024 * 1024, 1);
3315 printk(KERN_ERR
"Couldn't create bitmap entry %d\n", ret
);
3319 ret
= add_free_space_entry(cache
, 0, 1 * 1024 * 1024, 0);
3321 printk(KERN_ERR
"Couldn't add extent entry %d\n", ret
);
3325 ret
= btrfs_remove_free_space(cache
, 0, 1 * 1024 * 1024);
3327 printk(KERN_ERR
"Couldn't remove extent entry %d\n", ret
);
3331 if (check_exists(cache
, 0, 1 * 1024 * 1024)) {
3332 printk(KERN_ERR
"Left remnants after our remove\n");
3336 /* Now to add back the extent entry and remove from the bitmap */
3337 ret
= add_free_space_entry(cache
, 0, 1 * 1024 * 1024, 0);
3339 printk(KERN_ERR
"Couldn't re-add extent entry %d\n", ret
);
3343 ret
= btrfs_remove_free_space(cache
, 4 * 1024 * 1024, 1 * 1024 * 1024);
3345 printk(KERN_ERR
"Couldn't remove from bitmap %d\n", ret
);
3349 if (check_exists(cache
, 4 * 1024 * 1024, 1 * 1024 * 1024)) {
3350 printk(KERN_ERR
"Left remnants in the bitmap\n");
3355 * Ok so a little more evil, extent entry and bitmap at the same offset,
3356 * removing an overlapping chunk.
3358 ret
= add_free_space_entry(cache
, 1 * 1024 * 1024, 4 * 1024 * 1024, 1);
3360 printk(KERN_ERR
"Couldn't add to a bitmap %d\n", ret
);
3364 ret
= btrfs_remove_free_space(cache
, 512 * 1024, 3 * 1024 * 1024);
3366 printk(KERN_ERR
"Couldn't remove overlapping space %d\n", ret
);
3370 if (check_exists(cache
, 512 * 1024, 3 * 1024 * 1024)) {
3371 printk(KERN_ERR
"Left over peices after removing "
3376 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3378 /* Now with the extent entry offset into the bitmap */
3379 ret
= add_free_space_entry(cache
, 4 * 1024 * 1024, 4 * 1024 * 1024, 1);
3381 printk(KERN_ERR
"Couldn't add space to the bitmap %d\n", ret
);
3385 ret
= add_free_space_entry(cache
, 2 * 1024 * 1024, 2 * 1024 * 1024, 0);
3387 printk(KERN_ERR
"Couldn't add extent to the cache %d\n", ret
);
3391 ret
= btrfs_remove_free_space(cache
, 3 * 1024 * 1024, 4 * 1024 * 1024);
3393 printk(KERN_ERR
"Problem removing overlapping space %d\n", ret
);
3397 if (check_exists(cache
, 3 * 1024 * 1024, 4 * 1024 * 1024)) {
3398 printk(KERN_ERR
"Left something behind when removing space");
3403 * This has blown up in the past, the extent entry starts before the
3404 * bitmap entry, but we're trying to remove an offset that falls
3405 * completely within the bitmap range and is in both the extent entry
3406 * and the bitmap entry, looks like this
3412 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3413 ret
= add_free_space_entry(cache
, bitmap_offset
+ 4 * 1024 * 1024,
3414 4 * 1024 * 1024, 1);
3416 printk(KERN_ERR
"Couldn't add bitmap %d\n", ret
);
3420 ret
= add_free_space_entry(cache
, bitmap_offset
- 1 * 1024 * 1024,
3421 5 * 1024 * 1024, 0);
3423 printk(KERN_ERR
"Couldn't add extent entry %d\n", ret
);
3427 ret
= btrfs_remove_free_space(cache
, bitmap_offset
+ 1 * 1024 * 1024,
3430 printk(KERN_ERR
"Failed to free our space %d\n", ret
);
3434 if (check_exists(cache
, bitmap_offset
+ 1 * 1024 * 1024,
3436 printk(KERN_ERR
"Left stuff over\n");
3440 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3443 * This blew up before, we have part of the free space in a bitmap and
3444 * then the entirety of the rest of the space in an extent. This used
3445 * to return -EAGAIN back from btrfs_remove_extent, make sure this
3448 ret
= add_free_space_entry(cache
, 1 * 1024 * 1024, 2 * 1024 * 1024, 1);
3450 printk(KERN_ERR
"Couldn't add bitmap entry %d\n", ret
);
3454 ret
= add_free_space_entry(cache
, 3 * 1024 * 1024, 1 * 1024 * 1024, 0);
3456 printk(KERN_ERR
"Couldn't add extent entry %d\n", ret
);
3460 ret
= btrfs_remove_free_space(cache
, 1 * 1024 * 1024, 3 * 1024 * 1024);
3462 printk(KERN_ERR
"Error removing bitmap and extent "
3463 "overlapping %d\n", ret
);
3467 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3471 void btrfs_test_free_space_cache(void)
3473 struct btrfs_block_group_cache
*cache
;
3475 printk(KERN_ERR
"Running btrfs free space cache tests\n");
3477 cache
= init_test_block_group();
3479 printk(KERN_ERR
"Couldn't run the tests\n");
3483 if (test_extents(cache
))
3485 if (test_bitmaps(cache
))
3487 if (test_bitmaps_and_extents(cache
))
3490 __btrfs_remove_free_space_cache(cache
->free_space_ctl
);
3491 kfree(cache
->free_space_ctl
);
3493 printk(KERN_ERR
"Free space cache tests finished\n");
3495 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */