2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
34 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
35 struct btrfs_free_space
*info
);
37 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
38 struct btrfs_path
*path
,
42 struct btrfs_key location
;
43 struct btrfs_disk_key disk_key
;
44 struct btrfs_free_space_header
*header
;
45 struct extent_buffer
*leaf
;
46 struct inode
*inode
= NULL
;
49 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
53 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
57 btrfs_release_path(path
);
58 return ERR_PTR(-ENOENT
);
61 leaf
= path
->nodes
[0];
62 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
63 struct btrfs_free_space_header
);
64 btrfs_free_space_key(leaf
, header
, &disk_key
);
65 btrfs_disk_key_to_cpu(&location
, &disk_key
);
66 btrfs_release_path(path
);
68 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
70 return ERR_PTR(-ENOENT
);
73 if (is_bad_inode(inode
)) {
75 return ERR_PTR(-ENOENT
);
78 inode
->i_mapping
->flags
&= ~__GFP_FS
;
83 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
84 struct btrfs_block_group_cache
85 *block_group
, struct btrfs_path
*path
)
87 struct inode
*inode
= NULL
;
88 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
90 spin_lock(&block_group
->lock
);
91 if (block_group
->inode
)
92 inode
= igrab(block_group
->inode
);
93 spin_unlock(&block_group
->lock
);
97 inode
= __lookup_free_space_inode(root
, path
,
98 block_group
->key
.objectid
);
102 spin_lock(&block_group
->lock
);
103 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
104 printk(KERN_INFO
"Old style space inode found, converting.\n");
105 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
106 BTRFS_INODE_NODATACOW
;
107 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
110 if (!block_group
->iref
) {
111 block_group
->inode
= igrab(inode
);
112 block_group
->iref
= 1;
114 spin_unlock(&block_group
->lock
);
119 int __create_free_space_inode(struct btrfs_root
*root
,
120 struct btrfs_trans_handle
*trans
,
121 struct btrfs_path
*path
, u64 ino
, u64 offset
)
123 struct btrfs_key key
;
124 struct btrfs_disk_key disk_key
;
125 struct btrfs_free_space_header
*header
;
126 struct btrfs_inode_item
*inode_item
;
127 struct extent_buffer
*leaf
;
128 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
131 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
135 /* We inline crc's for the free disk space cache */
136 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
137 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
139 leaf
= path
->nodes
[0];
140 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
141 struct btrfs_inode_item
);
142 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
143 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
144 sizeof(*inode_item
));
145 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
146 btrfs_set_inode_size(leaf
, inode_item
, 0);
147 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
148 btrfs_set_inode_uid(leaf
, inode_item
, 0);
149 btrfs_set_inode_gid(leaf
, inode_item
, 0);
150 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
151 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
152 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
153 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
154 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
155 btrfs_mark_buffer_dirty(leaf
);
156 btrfs_release_path(path
);
158 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
162 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
163 sizeof(struct btrfs_free_space_header
));
165 btrfs_release_path(path
);
168 leaf
= path
->nodes
[0];
169 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
170 struct btrfs_free_space_header
);
171 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
172 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
173 btrfs_mark_buffer_dirty(leaf
);
174 btrfs_release_path(path
);
179 int create_free_space_inode(struct btrfs_root
*root
,
180 struct btrfs_trans_handle
*trans
,
181 struct btrfs_block_group_cache
*block_group
,
182 struct btrfs_path
*path
)
187 ret
= btrfs_find_free_objectid(root
, &ino
);
191 return __create_free_space_inode(root
, trans
, path
, ino
,
192 block_group
->key
.objectid
);
195 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
196 struct btrfs_trans_handle
*trans
,
197 struct btrfs_path
*path
,
200 struct btrfs_block_rsv
*rsv
;
205 rsv
= trans
->block_rsv
;
206 trans
->block_rsv
= &root
->fs_info
->global_block_rsv
;
208 /* 1 for slack space, 1 for updating the inode */
209 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
210 btrfs_calc_trans_metadata_size(root
, 1);
212 spin_lock(&trans
->block_rsv
->lock
);
213 if (trans
->block_rsv
->reserved
< needed_bytes
) {
214 spin_unlock(&trans
->block_rsv
->lock
);
215 trans
->block_rsv
= rsv
;
218 spin_unlock(&trans
->block_rsv
->lock
);
220 oldsize
= i_size_read(inode
);
221 btrfs_i_size_write(inode
, 0);
222 truncate_pagecache(inode
, oldsize
, 0);
225 * We don't need an orphan item because truncating the free space cache
226 * will never be split across transactions.
228 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
229 0, BTRFS_EXTENT_DATA_KEY
);
232 trans
->block_rsv
= rsv
;
237 ret
= btrfs_update_inode(trans
, root
, inode
);
238 trans
->block_rsv
= rsv
;
243 static int readahead_cache(struct inode
*inode
)
245 struct file_ra_state
*ra
;
246 unsigned long last_index
;
248 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
252 file_ra_state_init(ra
, inode
->i_mapping
);
253 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
255 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
266 struct btrfs_root
*root
;
270 unsigned check_crcs
:1;
273 static int io_ctl_init(struct io_ctl
*io_ctl
, struct inode
*inode
,
274 struct btrfs_root
*root
)
276 memset(io_ctl
, 0, sizeof(struct io_ctl
));
277 io_ctl
->num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
279 io_ctl
->pages
= kzalloc(sizeof(struct page
*) * io_ctl
->num_pages
,
284 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
285 io_ctl
->check_crcs
= 1;
289 static void io_ctl_free(struct io_ctl
*io_ctl
)
291 kfree(io_ctl
->pages
);
294 static void io_ctl_unmap_page(struct io_ctl
*io_ctl
)
297 kunmap(io_ctl
->page
);
303 static void io_ctl_map_page(struct io_ctl
*io_ctl
, int clear
)
305 WARN_ON(io_ctl
->cur
);
306 BUG_ON(io_ctl
->index
>= io_ctl
->num_pages
);
307 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
308 io_ctl
->cur
= kmap(io_ctl
->page
);
309 io_ctl
->orig
= io_ctl
->cur
;
310 io_ctl
->size
= PAGE_CACHE_SIZE
;
312 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
315 static void io_ctl_drop_pages(struct io_ctl
*io_ctl
)
319 io_ctl_unmap_page(io_ctl
);
321 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
322 ClearPageChecked(io_ctl
->pages
[i
]);
323 unlock_page(io_ctl
->pages
[i
]);
324 page_cache_release(io_ctl
->pages
[i
]);
328 static int io_ctl_prepare_pages(struct io_ctl
*io_ctl
, struct inode
*inode
,
332 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
335 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
336 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
338 io_ctl_drop_pages(io_ctl
);
341 io_ctl
->pages
[i
] = page
;
342 if (uptodate
&& !PageUptodate(page
)) {
343 btrfs_readpage(NULL
, page
);
345 if (!PageUptodate(page
)) {
346 printk(KERN_ERR
"btrfs: error reading free "
348 io_ctl_drop_pages(io_ctl
);
357 static void io_ctl_set_generation(struct io_ctl
*io_ctl
, u64 generation
)
361 io_ctl_map_page(io_ctl
, 1);
364 * Skip the csum areas. If we don't check crcs then we just have a
365 * 64bit chunk at the front of the first page.
367 if (io_ctl
->check_crcs
) {
368 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
369 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
371 io_ctl
->cur
+= sizeof(u64
);
372 io_ctl
->size
-= sizeof(u64
) * 2;
376 *val
= cpu_to_le64(generation
);
377 io_ctl
->cur
+= sizeof(u64
);
380 static int io_ctl_check_generation(struct io_ctl
*io_ctl
, u64 generation
)
385 * Skip the crc area. If we don't check crcs then we just have a 64bit
386 * chunk at the front of the first page.
388 if (io_ctl
->check_crcs
) {
389 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
390 io_ctl
->size
-= sizeof(u64
) +
391 (sizeof(u32
) * io_ctl
->num_pages
);
393 io_ctl
->cur
+= sizeof(u64
);
394 io_ctl
->size
-= sizeof(u64
) * 2;
398 if (le64_to_cpu(*gen
) != generation
) {
399 printk_ratelimited(KERN_ERR
"btrfs: space cache generation "
400 "(%Lu) does not match inode (%Lu)\n", *gen
,
402 io_ctl_unmap_page(io_ctl
);
405 io_ctl
->cur
+= sizeof(u64
);
409 static void io_ctl_set_crc(struct io_ctl
*io_ctl
, int index
)
415 if (!io_ctl
->check_crcs
) {
416 io_ctl_unmap_page(io_ctl
);
421 offset
= sizeof(u32
) * io_ctl
->num_pages
;;
423 crc
= btrfs_csum_data(io_ctl
->root
, io_ctl
->orig
+ offset
, crc
,
424 PAGE_CACHE_SIZE
- offset
);
425 btrfs_csum_final(crc
, (char *)&crc
);
426 io_ctl_unmap_page(io_ctl
);
427 tmp
= kmap(io_ctl
->pages
[0]);
430 kunmap(io_ctl
->pages
[0]);
433 static int io_ctl_check_crc(struct io_ctl
*io_ctl
, int index
)
439 if (!io_ctl
->check_crcs
) {
440 io_ctl_map_page(io_ctl
, 0);
445 offset
= sizeof(u32
) * io_ctl
->num_pages
;
447 tmp
= kmap(io_ctl
->pages
[0]);
450 kunmap(io_ctl
->pages
[0]);
452 io_ctl_map_page(io_ctl
, 0);
453 crc
= btrfs_csum_data(io_ctl
->root
, io_ctl
->orig
+ offset
, crc
,
454 PAGE_CACHE_SIZE
- offset
);
455 btrfs_csum_final(crc
, (char *)&crc
);
457 printk_ratelimited(KERN_ERR
"btrfs: csum mismatch on free "
459 io_ctl_unmap_page(io_ctl
);
466 static int io_ctl_add_entry(struct io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
469 struct btrfs_free_space_entry
*entry
;
475 entry
->offset
= cpu_to_le64(offset
);
476 entry
->bytes
= cpu_to_le64(bytes
);
477 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
478 BTRFS_FREE_SPACE_EXTENT
;
479 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
480 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
482 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
485 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
487 /* No more pages to map */
488 if (io_ctl
->index
>= io_ctl
->num_pages
)
491 /* map the next page */
492 io_ctl_map_page(io_ctl
, 1);
496 static int io_ctl_add_bitmap(struct io_ctl
*io_ctl
, void *bitmap
)
502 * If we aren't at the start of the current page, unmap this one and
503 * map the next one if there is any left.
505 if (io_ctl
->cur
!= io_ctl
->orig
) {
506 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
507 if (io_ctl
->index
>= io_ctl
->num_pages
)
509 io_ctl_map_page(io_ctl
, 0);
512 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
513 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
514 if (io_ctl
->index
< io_ctl
->num_pages
)
515 io_ctl_map_page(io_ctl
, 0);
519 static void io_ctl_zero_remaining_pages(struct io_ctl
*io_ctl
)
522 * If we're not on the boundary we know we've modified the page and we
523 * need to crc the page.
525 if (io_ctl
->cur
!= io_ctl
->orig
)
526 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
528 io_ctl_unmap_page(io_ctl
);
530 while (io_ctl
->index
< io_ctl
->num_pages
) {
531 io_ctl_map_page(io_ctl
, 1);
532 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
536 static int io_ctl_read_entry(struct io_ctl
*io_ctl
,
537 struct btrfs_free_space
*entry
, u8
*type
)
539 struct btrfs_free_space_entry
*e
;
542 entry
->offset
= le64_to_cpu(e
->offset
);
543 entry
->bytes
= le64_to_cpu(e
->bytes
);
545 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
546 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
548 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
551 io_ctl_unmap_page(io_ctl
);
553 if (io_ctl
->index
>= io_ctl
->num_pages
)
556 return io_ctl_check_crc(io_ctl
, io_ctl
->index
);
559 static int io_ctl_read_bitmap(struct io_ctl
*io_ctl
,
560 struct btrfs_free_space
*entry
)
564 if (io_ctl
->cur
&& io_ctl
->cur
!= io_ctl
->orig
)
565 io_ctl_unmap_page(io_ctl
);
567 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
571 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
572 io_ctl_unmap_page(io_ctl
);
577 int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
578 struct btrfs_free_space_ctl
*ctl
,
579 struct btrfs_path
*path
, u64 offset
)
581 struct btrfs_free_space_header
*header
;
582 struct extent_buffer
*leaf
;
583 struct io_ctl io_ctl
;
584 struct btrfs_key key
;
585 struct btrfs_free_space
*e
, *n
;
586 struct list_head bitmaps
;
593 INIT_LIST_HEAD(&bitmaps
);
595 /* Nothing in the space cache, goodbye */
596 if (!i_size_read(inode
))
599 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
603 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
607 btrfs_release_path(path
);
613 leaf
= path
->nodes
[0];
614 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
615 struct btrfs_free_space_header
);
616 num_entries
= btrfs_free_space_entries(leaf
, header
);
617 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
618 generation
= btrfs_free_space_generation(leaf
, header
);
619 btrfs_release_path(path
);
621 if (BTRFS_I(inode
)->generation
!= generation
) {
622 printk(KERN_ERR
"btrfs: free space inode generation (%llu) did"
623 " not match free space cache generation (%llu)\n",
624 (unsigned long long)BTRFS_I(inode
)->generation
,
625 (unsigned long long)generation
);
632 io_ctl_init(&io_ctl
, inode
, root
);
633 ret
= readahead_cache(inode
);
637 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
641 ret
= io_ctl_check_crc(&io_ctl
, 0);
645 ret
= io_ctl_check_generation(&io_ctl
, generation
);
649 while (num_entries
) {
650 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
655 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
657 kmem_cache_free(btrfs_free_space_cachep
, e
);
662 kmem_cache_free(btrfs_free_space_cachep
, e
);
666 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
667 spin_lock(&ctl
->tree_lock
);
668 ret
= link_free_space(ctl
, e
);
669 spin_unlock(&ctl
->tree_lock
);
671 printk(KERN_ERR
"Duplicate entries in "
672 "free space cache, dumping\n");
673 kmem_cache_free(btrfs_free_space_cachep
, e
);
677 BUG_ON(!num_bitmaps
);
679 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
682 btrfs_free_space_cachep
, e
);
685 spin_lock(&ctl
->tree_lock
);
686 ret
= link_free_space(ctl
, e
);
687 ctl
->total_bitmaps
++;
688 ctl
->op
->recalc_thresholds(ctl
);
689 spin_unlock(&ctl
->tree_lock
);
691 printk(KERN_ERR
"Duplicate entries in "
692 "free space cache, dumping\n");
693 kmem_cache_free(btrfs_free_space_cachep
, e
);
696 list_add_tail(&e
->list
, &bitmaps
);
703 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache.
706 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
707 list_del_init(&e
->list
);
708 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
713 io_ctl_drop_pages(&io_ctl
);
716 io_ctl_free(&io_ctl
);
719 io_ctl_drop_pages(&io_ctl
);
720 __btrfs_remove_free_space_cache(ctl
);
724 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
725 struct btrfs_block_group_cache
*block_group
)
727 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
728 struct btrfs_root
*root
= fs_info
->tree_root
;
730 struct btrfs_path
*path
;
733 u64 used
= btrfs_block_group_used(&block_group
->item
);
736 * If we're unmounting then just return, since this does a search on the
737 * normal root and not the commit root and we could deadlock.
739 if (btrfs_fs_closing(fs_info
))
743 * If this block group has been marked to be cleared for one reason or
744 * another then we can't trust the on disk cache, so just return.
746 spin_lock(&block_group
->lock
);
747 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
748 spin_unlock(&block_group
->lock
);
751 spin_unlock(&block_group
->lock
);
753 path
= btrfs_alloc_path();
757 inode
= lookup_free_space_inode(root
, block_group
, path
);
759 btrfs_free_path(path
);
763 /* We may have converted the inode and made the cache invalid. */
764 spin_lock(&block_group
->lock
);
765 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
766 spin_unlock(&block_group
->lock
);
769 spin_unlock(&block_group
->lock
);
771 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
772 path
, block_group
->key
.objectid
);
773 btrfs_free_path(path
);
777 spin_lock(&ctl
->tree_lock
);
778 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
779 block_group
->bytes_super
));
780 spin_unlock(&ctl
->tree_lock
);
783 __btrfs_remove_free_space_cache(ctl
);
784 printk(KERN_ERR
"block group %llu has an wrong amount of free "
785 "space\n", block_group
->key
.objectid
);
790 /* This cache is bogus, make sure it gets cleared */
791 spin_lock(&block_group
->lock
);
792 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
793 spin_unlock(&block_group
->lock
);
796 printk(KERN_ERR
"btrfs: failed to load free space cache "
797 "for block group %llu\n", block_group
->key
.objectid
);
805 * __btrfs_write_out_cache - write out cached info to an inode
806 * @root - the root the inode belongs to
807 * @ctl - the free space cache we are going to write out
808 * @block_group - the block_group for this cache if it belongs to a block_group
809 * @trans - the trans handle
810 * @path - the path to use
811 * @offset - the offset for the key we'll insert
813 * This function writes out a free space cache struct to disk for quick recovery
814 * on mount. This will return 0 if it was successfull in writing the cache out,
815 * and -1 if it was not.
817 int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
818 struct btrfs_free_space_ctl
*ctl
,
819 struct btrfs_block_group_cache
*block_group
,
820 struct btrfs_trans_handle
*trans
,
821 struct btrfs_path
*path
, u64 offset
)
823 struct btrfs_free_space_header
*header
;
824 struct extent_buffer
*leaf
;
825 struct rb_node
*node
;
826 struct list_head
*pos
, *n
;
827 struct extent_state
*cached_state
= NULL
;
828 struct btrfs_free_cluster
*cluster
= NULL
;
829 struct extent_io_tree
*unpin
= NULL
;
830 struct io_ctl io_ctl
;
831 struct list_head bitmap_list
;
832 struct btrfs_key key
;
839 INIT_LIST_HEAD(&bitmap_list
);
841 if (!i_size_read(inode
))
844 io_ctl_init(&io_ctl
, inode
, root
);
846 /* Get the cluster for this block_group if it exists */
847 if (block_group
&& !list_empty(&block_group
->cluster_list
))
848 cluster
= list_entry(block_group
->cluster_list
.next
,
849 struct btrfs_free_cluster
,
853 * We shouldn't have switched the pinned extents yet so this is the
856 unpin
= root
->fs_info
->pinned_extents
;
858 /* Lock all pages first so we can lock the extent safely. */
859 io_ctl_prepare_pages(&io_ctl
, inode
, 0);
861 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
862 0, &cached_state
, GFP_NOFS
);
865 * When searching for pinned extents, we need to start at our start
869 start
= block_group
->key
.objectid
;
871 node
= rb_first(&ctl
->free_space_offset
);
872 if (!node
&& cluster
) {
873 node
= rb_first(&cluster
->root
);
877 /* Make sure we can fit our crcs into the first page */
878 if (io_ctl
.check_crcs
&&
879 (io_ctl
.num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
) {
884 io_ctl_set_generation(&io_ctl
, trans
->transid
);
886 /* Write out the extent entries */
888 struct btrfs_free_space
*e
;
890 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
893 ret
= io_ctl_add_entry(&io_ctl
, e
->offset
, e
->bytes
,
899 list_add_tail(&e
->list
, &bitmap_list
);
902 node
= rb_next(node
);
903 if (!node
&& cluster
) {
904 node
= rb_first(&cluster
->root
);
910 * We want to add any pinned extents to our free space cache
911 * so we don't leak the space
913 while (block_group
&& (start
< block_group
->key
.objectid
+
914 block_group
->key
.offset
)) {
915 ret
= find_first_extent_bit(unpin
, start
, &start
, &end
,
922 /* This pinned extent is out of our range */
923 if (start
>= block_group
->key
.objectid
+
924 block_group
->key
.offset
)
927 len
= block_group
->key
.objectid
+
928 block_group
->key
.offset
- start
;
929 len
= min(len
, end
+ 1 - start
);
932 ret
= io_ctl_add_entry(&io_ctl
, start
, len
, NULL
);
939 /* Write out the bitmaps */
940 list_for_each_safe(pos
, n
, &bitmap_list
) {
941 struct btrfs_free_space
*entry
=
942 list_entry(pos
, struct btrfs_free_space
, list
);
944 ret
= io_ctl_add_bitmap(&io_ctl
, entry
->bitmap
);
947 list_del_init(&entry
->list
);
950 /* Zero out the rest of the pages just to make sure */
951 io_ctl_zero_remaining_pages(&io_ctl
);
953 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
.pages
, io_ctl
.num_pages
,
954 0, i_size_read(inode
), &cached_state
);
955 io_ctl_drop_pages(&io_ctl
);
956 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
957 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
963 ret
= filemap_write_and_wait(inode
->i_mapping
);
967 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
971 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
973 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
974 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
978 leaf
= path
->nodes
[0];
980 struct btrfs_key found_key
;
981 BUG_ON(!path
->slots
[0]);
983 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
984 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
985 found_key
.offset
!= offset
) {
986 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
988 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
990 btrfs_release_path(path
);
995 BTRFS_I(inode
)->generation
= trans
->transid
;
996 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
997 struct btrfs_free_space_header
);
998 btrfs_set_free_space_entries(leaf
, header
, entries
);
999 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1000 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1001 btrfs_mark_buffer_dirty(leaf
);
1002 btrfs_release_path(path
);
1006 io_ctl_free(&io_ctl
);
1008 invalidate_inode_pages2(inode
->i_mapping
);
1009 BTRFS_I(inode
)->generation
= 0;
1011 btrfs_update_inode(trans
, root
, inode
);
1015 list_for_each_safe(pos
, n
, &bitmap_list
) {
1016 struct btrfs_free_space
*entry
=
1017 list_entry(pos
, struct btrfs_free_space
, list
);
1018 list_del_init(&entry
->list
);
1020 io_ctl_drop_pages(&io_ctl
);
1021 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1022 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1026 int btrfs_write_out_cache(struct btrfs_root
*root
,
1027 struct btrfs_trans_handle
*trans
,
1028 struct btrfs_block_group_cache
*block_group
,
1029 struct btrfs_path
*path
)
1031 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1032 struct inode
*inode
;
1035 root
= root
->fs_info
->tree_root
;
1037 spin_lock(&block_group
->lock
);
1038 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1039 spin_unlock(&block_group
->lock
);
1042 spin_unlock(&block_group
->lock
);
1044 inode
= lookup_free_space_inode(root
, block_group
, path
);
1048 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
1049 path
, block_group
->key
.objectid
);
1051 spin_lock(&block_group
->lock
);
1052 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1053 spin_unlock(&block_group
->lock
);
1056 printk(KERN_ERR
"btrfs: failed to write free space cace "
1057 "for block group %llu\n", block_group
->key
.objectid
);
1065 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1068 BUG_ON(offset
< bitmap_start
);
1069 offset
-= bitmap_start
;
1070 return (unsigned long)(div_u64(offset
, unit
));
1073 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1075 return (unsigned long)(div_u64(bytes
, unit
));
1078 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1082 u64 bytes_per_bitmap
;
1084 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1085 bitmap_start
= offset
- ctl
->start
;
1086 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1087 bitmap_start
*= bytes_per_bitmap
;
1088 bitmap_start
+= ctl
->start
;
1090 return bitmap_start
;
1093 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1094 struct rb_node
*node
, int bitmap
)
1096 struct rb_node
**p
= &root
->rb_node
;
1097 struct rb_node
*parent
= NULL
;
1098 struct btrfs_free_space
*info
;
1102 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1104 if (offset
< info
->offset
) {
1106 } else if (offset
> info
->offset
) {
1107 p
= &(*p
)->rb_right
;
1110 * we could have a bitmap entry and an extent entry
1111 * share the same offset. If this is the case, we want
1112 * the extent entry to always be found first if we do a
1113 * linear search through the tree, since we want to have
1114 * the quickest allocation time, and allocating from an
1115 * extent is faster than allocating from a bitmap. So
1116 * if we're inserting a bitmap and we find an entry at
1117 * this offset, we want to go right, or after this entry
1118 * logically. If we are inserting an extent and we've
1119 * found a bitmap, we want to go left, or before
1127 p
= &(*p
)->rb_right
;
1129 if (!info
->bitmap
) {
1138 rb_link_node(node
, parent
, p
);
1139 rb_insert_color(node
, root
);
1145 * searches the tree for the given offset.
1147 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1148 * want a section that has at least bytes size and comes at or after the given
1151 static struct btrfs_free_space
*
1152 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1153 u64 offset
, int bitmap_only
, int fuzzy
)
1155 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1156 struct btrfs_free_space
*entry
, *prev
= NULL
;
1158 /* find entry that is closest to the 'offset' */
1165 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1168 if (offset
< entry
->offset
)
1170 else if (offset
> entry
->offset
)
1183 * bitmap entry and extent entry may share same offset,
1184 * in that case, bitmap entry comes after extent entry.
1189 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1190 if (entry
->offset
!= offset
)
1193 WARN_ON(!entry
->bitmap
);
1196 if (entry
->bitmap
) {
1198 * if previous extent entry covers the offset,
1199 * we should return it instead of the bitmap entry
1201 n
= &entry
->offset_index
;
1206 prev
= rb_entry(n
, struct btrfs_free_space
,
1208 if (!prev
->bitmap
) {
1209 if (prev
->offset
+ prev
->bytes
> offset
)
1221 /* find last entry before the 'offset' */
1223 if (entry
->offset
> offset
) {
1224 n
= rb_prev(&entry
->offset_index
);
1226 entry
= rb_entry(n
, struct btrfs_free_space
,
1228 BUG_ON(entry
->offset
> offset
);
1237 if (entry
->bitmap
) {
1238 n
= &entry
->offset_index
;
1243 prev
= rb_entry(n
, struct btrfs_free_space
,
1245 if (!prev
->bitmap
) {
1246 if (prev
->offset
+ prev
->bytes
> offset
)
1251 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1253 } else if (entry
->offset
+ entry
->bytes
> offset
)
1260 if (entry
->bitmap
) {
1261 if (entry
->offset
+ BITS_PER_BITMAP
*
1265 if (entry
->offset
+ entry
->bytes
> offset
)
1269 n
= rb_next(&entry
->offset_index
);
1272 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1278 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1279 struct btrfs_free_space
*info
)
1281 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1282 ctl
->free_extents
--;
1285 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1286 struct btrfs_free_space
*info
)
1288 __unlink_free_space(ctl
, info
);
1289 ctl
->free_space
-= info
->bytes
;
1292 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1293 struct btrfs_free_space
*info
)
1297 BUG_ON(!info
->bitmap
&& !info
->bytes
);
1298 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1299 &info
->offset_index
, (info
->bitmap
!= NULL
));
1303 ctl
->free_space
+= info
->bytes
;
1304 ctl
->free_extents
++;
1308 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1310 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1314 u64 size
= block_group
->key
.offset
;
1315 u64 bytes_per_bg
= BITS_PER_BITMAP
* block_group
->sectorsize
;
1316 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1318 BUG_ON(ctl
->total_bitmaps
> max_bitmaps
);
1321 * The goal is to keep the total amount of memory used per 1gb of space
1322 * at or below 32k, so we need to adjust how much memory we allow to be
1323 * used by extent based free space tracking
1325 if (size
< 1024 * 1024 * 1024)
1326 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1328 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1329 div64_u64(size
, 1024 * 1024 * 1024);
1332 * we want to account for 1 more bitmap than what we have so we can make
1333 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1334 * we add more bitmaps.
1336 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1338 if (bitmap_bytes
>= max_bytes
) {
1339 ctl
->extents_thresh
= 0;
1344 * we want the extent entry threshold to always be at most 1/2 the maxw
1345 * bytes we can have, or whatever is less than that.
1347 extent_bytes
= max_bytes
- bitmap_bytes
;
1348 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1350 ctl
->extents_thresh
=
1351 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1354 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1355 struct btrfs_free_space
*info
,
1356 u64 offset
, u64 bytes
)
1358 unsigned long start
, count
;
1360 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1361 count
= bytes_to_bits(bytes
, ctl
->unit
);
1362 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1364 bitmap_clear(info
->bitmap
, start
, count
);
1366 info
->bytes
-= bytes
;
1369 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1370 struct btrfs_free_space
*info
, u64 offset
,
1373 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1374 ctl
->free_space
-= bytes
;
1377 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1378 struct btrfs_free_space
*info
, u64 offset
,
1381 unsigned long start
, count
;
1383 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1384 count
= bytes_to_bits(bytes
, ctl
->unit
);
1385 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1387 bitmap_set(info
->bitmap
, start
, count
);
1389 info
->bytes
+= bytes
;
1390 ctl
->free_space
+= bytes
;
1393 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1394 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1397 unsigned long found_bits
= 0;
1398 unsigned long bits
, i
;
1399 unsigned long next_zero
;
1401 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1402 max_t(u64
, *offset
, bitmap_info
->offset
));
1403 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1405 for (i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
);
1406 i
< BITS_PER_BITMAP
;
1407 i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
1408 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1409 BITS_PER_BITMAP
, i
);
1410 if ((next_zero
- i
) >= bits
) {
1411 found_bits
= next_zero
- i
;
1418 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1419 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1426 static struct btrfs_free_space
*
1427 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
)
1429 struct btrfs_free_space
*entry
;
1430 struct rb_node
*node
;
1433 if (!ctl
->free_space_offset
.rb_node
)
1436 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1440 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1441 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1442 if (entry
->bytes
< *bytes
)
1445 if (entry
->bitmap
) {
1446 ret
= search_bitmap(ctl
, entry
, offset
, bytes
);
1452 *offset
= entry
->offset
;
1453 *bytes
= entry
->bytes
;
1460 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1461 struct btrfs_free_space
*info
, u64 offset
)
1463 info
->offset
= offset_to_bitmap(ctl
, offset
);
1465 link_free_space(ctl
, info
);
1466 ctl
->total_bitmaps
++;
1468 ctl
->op
->recalc_thresholds(ctl
);
1471 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1472 struct btrfs_free_space
*bitmap_info
)
1474 unlink_free_space(ctl
, bitmap_info
);
1475 kfree(bitmap_info
->bitmap
);
1476 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1477 ctl
->total_bitmaps
--;
1478 ctl
->op
->recalc_thresholds(ctl
);
1481 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1482 struct btrfs_free_space
*bitmap_info
,
1483 u64
*offset
, u64
*bytes
)
1486 u64 search_start
, search_bytes
;
1490 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1493 * XXX - this can go away after a few releases.
1495 * since the only user of btrfs_remove_free_space is the tree logging
1496 * stuff, and the only way to test that is under crash conditions, we
1497 * want to have this debug stuff here just in case somethings not
1498 * working. Search the bitmap for the space we are trying to use to
1499 * make sure its actually there. If its not there then we need to stop
1500 * because something has gone wrong.
1502 search_start
= *offset
;
1503 search_bytes
= *bytes
;
1504 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1505 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1506 BUG_ON(ret
< 0 || search_start
!= *offset
);
1508 if (*offset
> bitmap_info
->offset
&& *offset
+ *bytes
> end
) {
1509 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, end
- *offset
+ 1);
1510 *bytes
-= end
- *offset
+ 1;
1512 } else if (*offset
>= bitmap_info
->offset
&& *offset
+ *bytes
<= end
) {
1513 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, *bytes
);
1518 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1519 if (!bitmap_info
->bytes
)
1520 free_bitmap(ctl
, bitmap_info
);
1523 * no entry after this bitmap, but we still have bytes to
1524 * remove, so something has gone wrong.
1529 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1533 * if the next entry isn't a bitmap we need to return to let the
1534 * extent stuff do its work.
1536 if (!bitmap_info
->bitmap
)
1540 * Ok the next item is a bitmap, but it may not actually hold
1541 * the information for the rest of this free space stuff, so
1542 * look for it, and if we don't find it return so we can try
1543 * everything over again.
1545 search_start
= *offset
;
1546 search_bytes
= *bytes
;
1547 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1549 if (ret
< 0 || search_start
!= *offset
)
1553 } else if (!bitmap_info
->bytes
)
1554 free_bitmap(ctl
, bitmap_info
);
1559 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1560 struct btrfs_free_space
*info
, u64 offset
,
1563 u64 bytes_to_set
= 0;
1566 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1568 bytes_to_set
= min(end
- offset
, bytes
);
1570 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1572 return bytes_to_set
;
1576 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1577 struct btrfs_free_space
*info
)
1579 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1582 * If we are below the extents threshold then we can add this as an
1583 * extent, and don't have to deal with the bitmap
1585 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1587 * If this block group has some small extents we don't want to
1588 * use up all of our free slots in the cache with them, we want
1589 * to reserve them to larger extents, however if we have plent
1590 * of cache left then go ahead an dadd them, no sense in adding
1591 * the overhead of a bitmap if we don't have to.
1593 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1594 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1602 * some block groups are so tiny they can't be enveloped by a bitmap, so
1603 * don't even bother to create a bitmap for this
1605 if (BITS_PER_BITMAP
* block_group
->sectorsize
>
1606 block_group
->key
.offset
)
1612 static struct btrfs_free_space_op free_space_op
= {
1613 .recalc_thresholds
= recalculate_thresholds
,
1614 .use_bitmap
= use_bitmap
,
1617 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1618 struct btrfs_free_space
*info
)
1620 struct btrfs_free_space
*bitmap_info
;
1621 struct btrfs_block_group_cache
*block_group
= NULL
;
1623 u64 bytes
, offset
, bytes_added
;
1626 bytes
= info
->bytes
;
1627 offset
= info
->offset
;
1629 if (!ctl
->op
->use_bitmap(ctl
, info
))
1632 if (ctl
->op
== &free_space_op
)
1633 block_group
= ctl
->private;
1636 * Since we link bitmaps right into the cluster we need to see if we
1637 * have a cluster here, and if so and it has our bitmap we need to add
1638 * the free space to that bitmap.
1640 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
1641 struct btrfs_free_cluster
*cluster
;
1642 struct rb_node
*node
;
1643 struct btrfs_free_space
*entry
;
1645 cluster
= list_entry(block_group
->cluster_list
.next
,
1646 struct btrfs_free_cluster
,
1648 spin_lock(&cluster
->lock
);
1649 node
= rb_first(&cluster
->root
);
1651 spin_unlock(&cluster
->lock
);
1652 goto no_cluster_bitmap
;
1655 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1656 if (!entry
->bitmap
) {
1657 spin_unlock(&cluster
->lock
);
1658 goto no_cluster_bitmap
;
1661 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
1662 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
1664 bytes
-= bytes_added
;
1665 offset
+= bytes_added
;
1667 spin_unlock(&cluster
->lock
);
1675 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1682 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
1683 bytes
-= bytes_added
;
1684 offset
+= bytes_added
;
1694 if (info
&& info
->bitmap
) {
1695 add_new_bitmap(ctl
, info
, offset
);
1700 spin_unlock(&ctl
->tree_lock
);
1702 /* no pre-allocated info, allocate a new one */
1704 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1707 spin_lock(&ctl
->tree_lock
);
1713 /* allocate the bitmap */
1714 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1715 spin_lock(&ctl
->tree_lock
);
1716 if (!info
->bitmap
) {
1726 kfree(info
->bitmap
);
1727 kmem_cache_free(btrfs_free_space_cachep
, info
);
1733 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1734 struct btrfs_free_space
*info
, bool update_stat
)
1736 struct btrfs_free_space
*left_info
;
1737 struct btrfs_free_space
*right_info
;
1738 bool merged
= false;
1739 u64 offset
= info
->offset
;
1740 u64 bytes
= info
->bytes
;
1743 * first we want to see if there is free space adjacent to the range we
1744 * are adding, if there is remove that struct and add a new one to
1745 * cover the entire range
1747 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1748 if (right_info
&& rb_prev(&right_info
->offset_index
))
1749 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1750 struct btrfs_free_space
, offset_index
);
1752 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1754 if (right_info
&& !right_info
->bitmap
) {
1756 unlink_free_space(ctl
, right_info
);
1758 __unlink_free_space(ctl
, right_info
);
1759 info
->bytes
+= right_info
->bytes
;
1760 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1764 if (left_info
&& !left_info
->bitmap
&&
1765 left_info
->offset
+ left_info
->bytes
== offset
) {
1767 unlink_free_space(ctl
, left_info
);
1769 __unlink_free_space(ctl
, left_info
);
1770 info
->offset
= left_info
->offset
;
1771 info
->bytes
+= left_info
->bytes
;
1772 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1779 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1780 u64 offset
, u64 bytes
)
1782 struct btrfs_free_space
*info
;
1785 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1789 info
->offset
= offset
;
1790 info
->bytes
= bytes
;
1792 spin_lock(&ctl
->tree_lock
);
1794 if (try_merge_free_space(ctl
, info
, true))
1798 * There was no extent directly to the left or right of this new
1799 * extent then we know we're going to have to allocate a new extent, so
1800 * before we do that see if we need to drop this into a bitmap
1802 ret
= insert_into_bitmap(ctl
, info
);
1810 ret
= link_free_space(ctl
, info
);
1812 kmem_cache_free(btrfs_free_space_cachep
, info
);
1814 spin_unlock(&ctl
->tree_lock
);
1817 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1818 BUG_ON(ret
== -EEXIST
);
1824 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1825 u64 offset
, u64 bytes
)
1827 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1828 struct btrfs_free_space
*info
;
1829 struct btrfs_free_space
*next_info
= NULL
;
1832 spin_lock(&ctl
->tree_lock
);
1835 info
= tree_search_offset(ctl
, offset
, 0, 0);
1838 * oops didn't find an extent that matched the space we wanted
1839 * to remove, look for a bitmap instead
1841 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1849 if (info
->bytes
< bytes
&& rb_next(&info
->offset_index
)) {
1851 next_info
= rb_entry(rb_next(&info
->offset_index
),
1852 struct btrfs_free_space
,
1855 if (next_info
->bitmap
)
1856 end
= next_info
->offset
+
1857 BITS_PER_BITMAP
* ctl
->unit
- 1;
1859 end
= next_info
->offset
+ next_info
->bytes
;
1861 if (next_info
->bytes
< bytes
||
1862 next_info
->offset
> offset
|| offset
> end
) {
1863 printk(KERN_CRIT
"Found free space at %llu, size %llu,"
1864 " trying to use %llu\n",
1865 (unsigned long long)info
->offset
,
1866 (unsigned long long)info
->bytes
,
1867 (unsigned long long)bytes
);
1876 if (info
->bytes
== bytes
) {
1877 unlink_free_space(ctl
, info
);
1879 kfree(info
->bitmap
);
1880 ctl
->total_bitmaps
--;
1882 kmem_cache_free(btrfs_free_space_cachep
, info
);
1887 if (!info
->bitmap
&& info
->offset
== offset
) {
1888 unlink_free_space(ctl
, info
);
1889 info
->offset
+= bytes
;
1890 info
->bytes
-= bytes
;
1891 ret
= link_free_space(ctl
, info
);
1896 if (!info
->bitmap
&& info
->offset
<= offset
&&
1897 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
1898 u64 old_start
= info
->offset
;
1900 * we're freeing space in the middle of the info,
1901 * this can happen during tree log replay
1903 * first unlink the old info and then
1904 * insert it again after the hole we're creating
1906 unlink_free_space(ctl
, info
);
1907 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
1908 u64 old_end
= info
->offset
+ info
->bytes
;
1910 info
->offset
= offset
+ bytes
;
1911 info
->bytes
= old_end
- info
->offset
;
1912 ret
= link_free_space(ctl
, info
);
1917 /* the hole we're creating ends at the end
1918 * of the info struct, just free the info
1920 kmem_cache_free(btrfs_free_space_cachep
, info
);
1922 spin_unlock(&ctl
->tree_lock
);
1924 /* step two, insert a new info struct to cover
1925 * anything before the hole
1927 ret
= btrfs_add_free_space(block_group
, old_start
,
1928 offset
- old_start
);
1933 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1938 spin_unlock(&ctl
->tree_lock
);
1943 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
1946 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1947 struct btrfs_free_space
*info
;
1951 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
1952 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1953 if (info
->bytes
>= bytes
)
1955 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
1956 (unsigned long long)info
->offset
,
1957 (unsigned long long)info
->bytes
,
1958 (info
->bitmap
) ? "yes" : "no");
1960 printk(KERN_INFO
"block group has cluster?: %s\n",
1961 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
1962 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
1966 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
1968 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1970 spin_lock_init(&ctl
->tree_lock
);
1971 ctl
->unit
= block_group
->sectorsize
;
1972 ctl
->start
= block_group
->key
.objectid
;
1973 ctl
->private = block_group
;
1974 ctl
->op
= &free_space_op
;
1977 * we only want to have 32k of ram per block group for keeping
1978 * track of free space, and if we pass 1/2 of that we want to
1979 * start converting things over to using bitmaps
1981 ctl
->extents_thresh
= ((1024 * 32) / 2) /
1982 sizeof(struct btrfs_free_space
);
1986 * for a given cluster, put all of its extents back into the free
1987 * space cache. If the block group passed doesn't match the block group
1988 * pointed to by the cluster, someone else raced in and freed the
1989 * cluster already. In that case, we just return without changing anything
1992 __btrfs_return_cluster_to_free_space(
1993 struct btrfs_block_group_cache
*block_group
,
1994 struct btrfs_free_cluster
*cluster
)
1996 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1997 struct btrfs_free_space
*entry
;
1998 struct rb_node
*node
;
2000 spin_lock(&cluster
->lock
);
2001 if (cluster
->block_group
!= block_group
)
2004 cluster
->block_group
= NULL
;
2005 cluster
->window_start
= 0;
2006 list_del_init(&cluster
->block_group_list
);
2008 node
= rb_first(&cluster
->root
);
2012 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2013 node
= rb_next(&entry
->offset_index
);
2014 rb_erase(&entry
->offset_index
, &cluster
->root
);
2016 bitmap
= (entry
->bitmap
!= NULL
);
2018 try_merge_free_space(ctl
, entry
, false);
2019 tree_insert_offset(&ctl
->free_space_offset
,
2020 entry
->offset
, &entry
->offset_index
, bitmap
);
2022 cluster
->root
= RB_ROOT
;
2025 spin_unlock(&cluster
->lock
);
2026 btrfs_put_block_group(block_group
);
2030 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl
*ctl
)
2032 struct btrfs_free_space
*info
;
2033 struct rb_node
*node
;
2035 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2036 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2037 if (!info
->bitmap
) {
2038 unlink_free_space(ctl
, info
);
2039 kmem_cache_free(btrfs_free_space_cachep
, info
);
2041 free_bitmap(ctl
, info
);
2043 if (need_resched()) {
2044 spin_unlock(&ctl
->tree_lock
);
2046 spin_lock(&ctl
->tree_lock
);
2051 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2053 spin_lock(&ctl
->tree_lock
);
2054 __btrfs_remove_free_space_cache_locked(ctl
);
2055 spin_unlock(&ctl
->tree_lock
);
2058 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2060 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2061 struct btrfs_free_cluster
*cluster
;
2062 struct list_head
*head
;
2064 spin_lock(&ctl
->tree_lock
);
2065 while ((head
= block_group
->cluster_list
.next
) !=
2066 &block_group
->cluster_list
) {
2067 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2070 WARN_ON(cluster
->block_group
!= block_group
);
2071 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2072 if (need_resched()) {
2073 spin_unlock(&ctl
->tree_lock
);
2075 spin_lock(&ctl
->tree_lock
);
2078 __btrfs_remove_free_space_cache_locked(ctl
);
2079 spin_unlock(&ctl
->tree_lock
);
2083 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2084 u64 offset
, u64 bytes
, u64 empty_size
)
2086 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2087 struct btrfs_free_space
*entry
= NULL
;
2088 u64 bytes_search
= bytes
+ empty_size
;
2091 spin_lock(&ctl
->tree_lock
);
2092 entry
= find_free_space(ctl
, &offset
, &bytes_search
);
2097 if (entry
->bitmap
) {
2098 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2100 free_bitmap(ctl
, entry
);
2102 unlink_free_space(ctl
, entry
);
2103 entry
->offset
+= bytes
;
2104 entry
->bytes
-= bytes
;
2106 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2108 link_free_space(ctl
, entry
);
2112 spin_unlock(&ctl
->tree_lock
);
2118 * given a cluster, put all of its extents back into the free space
2119 * cache. If a block group is passed, this function will only free
2120 * a cluster that belongs to the passed block group.
2122 * Otherwise, it'll get a reference on the block group pointed to by the
2123 * cluster and remove the cluster from it.
2125 int btrfs_return_cluster_to_free_space(
2126 struct btrfs_block_group_cache
*block_group
,
2127 struct btrfs_free_cluster
*cluster
)
2129 struct btrfs_free_space_ctl
*ctl
;
2132 /* first, get a safe pointer to the block group */
2133 spin_lock(&cluster
->lock
);
2135 block_group
= cluster
->block_group
;
2137 spin_unlock(&cluster
->lock
);
2140 } else if (cluster
->block_group
!= block_group
) {
2141 /* someone else has already freed it don't redo their work */
2142 spin_unlock(&cluster
->lock
);
2145 atomic_inc(&block_group
->count
);
2146 spin_unlock(&cluster
->lock
);
2148 ctl
= block_group
->free_space_ctl
;
2150 /* now return any extents the cluster had on it */
2151 spin_lock(&ctl
->tree_lock
);
2152 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2153 spin_unlock(&ctl
->tree_lock
);
2155 /* finally drop our ref */
2156 btrfs_put_block_group(block_group
);
2160 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2161 struct btrfs_free_cluster
*cluster
,
2162 struct btrfs_free_space
*entry
,
2163 u64 bytes
, u64 min_start
)
2165 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2167 u64 search_start
= cluster
->window_start
;
2168 u64 search_bytes
= bytes
;
2171 search_start
= min_start
;
2172 search_bytes
= bytes
;
2174 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2179 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2185 * given a cluster, try to allocate 'bytes' from it, returns 0
2186 * if it couldn't find anything suitably large, or a logical disk offset
2187 * if things worked out
2189 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2190 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2193 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2194 struct btrfs_free_space
*entry
= NULL
;
2195 struct rb_node
*node
;
2198 spin_lock(&cluster
->lock
);
2199 if (bytes
> cluster
->max_size
)
2202 if (cluster
->block_group
!= block_group
)
2205 node
= rb_first(&cluster
->root
);
2209 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2211 if (entry
->bytes
< bytes
||
2212 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2213 node
= rb_next(&entry
->offset_index
);
2216 entry
= rb_entry(node
, struct btrfs_free_space
,
2221 if (entry
->bitmap
) {
2222 ret
= btrfs_alloc_from_bitmap(block_group
,
2223 cluster
, entry
, bytes
,
2226 node
= rb_next(&entry
->offset_index
);
2229 entry
= rb_entry(node
, struct btrfs_free_space
,
2234 ret
= entry
->offset
;
2236 entry
->offset
+= bytes
;
2237 entry
->bytes
-= bytes
;
2240 if (entry
->bytes
== 0)
2241 rb_erase(&entry
->offset_index
, &cluster
->root
);
2245 spin_unlock(&cluster
->lock
);
2250 spin_lock(&ctl
->tree_lock
);
2252 ctl
->free_space
-= bytes
;
2253 if (entry
->bytes
== 0) {
2254 ctl
->free_extents
--;
2255 if (entry
->bitmap
) {
2256 kfree(entry
->bitmap
);
2257 ctl
->total_bitmaps
--;
2258 ctl
->op
->recalc_thresholds(ctl
);
2260 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2263 spin_unlock(&ctl
->tree_lock
);
2268 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2269 struct btrfs_free_space
*entry
,
2270 struct btrfs_free_cluster
*cluster
,
2271 u64 offset
, u64 bytes
, u64 min_bytes
)
2273 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2274 unsigned long next_zero
;
2276 unsigned long search_bits
;
2277 unsigned long total_bits
;
2278 unsigned long found_bits
;
2279 unsigned long start
= 0;
2280 unsigned long total_found
= 0;
2284 i
= offset_to_bit(entry
->offset
, block_group
->sectorsize
,
2285 max_t(u64
, offset
, entry
->offset
));
2286 search_bits
= bytes_to_bits(bytes
, block_group
->sectorsize
);
2287 total_bits
= bytes_to_bits(min_bytes
, block_group
->sectorsize
);
2291 for (i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
);
2292 i
< BITS_PER_BITMAP
;
2293 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
2294 next_zero
= find_next_zero_bit(entry
->bitmap
,
2295 BITS_PER_BITMAP
, i
);
2296 if (next_zero
- i
>= search_bits
) {
2297 found_bits
= next_zero
- i
;
2311 total_found
+= found_bits
;
2313 if (cluster
->max_size
< found_bits
* block_group
->sectorsize
)
2314 cluster
->max_size
= found_bits
* block_group
->sectorsize
;
2316 if (total_found
< total_bits
) {
2317 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, next_zero
);
2318 if (i
- start
> total_bits
* 2) {
2320 cluster
->max_size
= 0;
2326 cluster
->window_start
= start
* block_group
->sectorsize
+
2328 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2329 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2330 &entry
->offset_index
, 1);
2337 * This searches the block group for just extents to fill the cluster with.
2340 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2341 struct btrfs_free_cluster
*cluster
,
2342 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2345 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2346 struct btrfs_free_space
*first
= NULL
;
2347 struct btrfs_free_space
*entry
= NULL
;
2348 struct btrfs_free_space
*prev
= NULL
;
2349 struct btrfs_free_space
*last
;
2350 struct rb_node
*node
;
2354 u64 max_gap
= 128 * 1024;
2356 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2361 * We don't want bitmaps, so just move along until we find a normal
2364 while (entry
->bitmap
) {
2365 if (list_empty(&entry
->list
))
2366 list_add_tail(&entry
->list
, bitmaps
);
2367 node
= rb_next(&entry
->offset_index
);
2370 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2373 window_start
= entry
->offset
;
2374 window_free
= entry
->bytes
;
2375 max_extent
= entry
->bytes
;
2380 while (window_free
<= min_bytes
) {
2381 node
= rb_next(&entry
->offset_index
);
2384 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2386 if (entry
->bitmap
) {
2387 if (list_empty(&entry
->list
))
2388 list_add_tail(&entry
->list
, bitmaps
);
2393 * we haven't filled the empty size and the window is
2394 * very large. reset and try again
2396 if (entry
->offset
- (prev
->offset
+ prev
->bytes
) > max_gap
||
2397 entry
->offset
- window_start
> (min_bytes
* 2)) {
2399 window_start
= entry
->offset
;
2400 window_free
= entry
->bytes
;
2402 max_extent
= entry
->bytes
;
2405 window_free
+= entry
->bytes
;
2406 if (entry
->bytes
> max_extent
)
2407 max_extent
= entry
->bytes
;
2412 cluster
->window_start
= first
->offset
;
2414 node
= &first
->offset_index
;
2417 * now we've found our entries, pull them out of the free space
2418 * cache and put them into the cluster rbtree
2423 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2424 node
= rb_next(&entry
->offset_index
);
2428 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2429 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2430 &entry
->offset_index
, 0);
2432 } while (node
&& entry
!= last
);
2434 cluster
->max_size
= max_extent
;
2440 * This specifically looks for bitmaps that may work in the cluster, we assume
2441 * that we have already failed to find extents that will work.
2444 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2445 struct btrfs_free_cluster
*cluster
,
2446 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2449 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2450 struct btrfs_free_space
*entry
;
2451 struct rb_node
*node
;
2454 if (ctl
->total_bitmaps
== 0)
2458 * First check our cached list of bitmaps and see if there is an entry
2459 * here that will work.
2461 list_for_each_entry(entry
, bitmaps
, list
) {
2462 if (entry
->bytes
< min_bytes
)
2464 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2471 * If we do have entries on our list and we are here then we didn't find
2472 * anything, so go ahead and get the next entry after the last entry in
2473 * this list and start the search from there.
2475 if (!list_empty(bitmaps
)) {
2476 entry
= list_entry(bitmaps
->prev
, struct btrfs_free_space
,
2478 node
= rb_next(&entry
->offset_index
);
2481 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2485 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
), 0, 1);
2490 node
= &entry
->offset_index
;
2492 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2493 node
= rb_next(&entry
->offset_index
);
2496 if (entry
->bytes
< min_bytes
)
2498 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2500 } while (ret
&& node
);
2506 * here we try to find a cluster of blocks in a block group. The goal
2507 * is to find at least bytes free and up to empty_size + bytes free.
2508 * We might not find them all in one contiguous area.
2510 * returns zero and sets up cluster if things worked out, otherwise
2511 * it returns -enospc
2513 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
2514 struct btrfs_root
*root
,
2515 struct btrfs_block_group_cache
*block_group
,
2516 struct btrfs_free_cluster
*cluster
,
2517 u64 offset
, u64 bytes
, u64 empty_size
)
2519 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2520 struct list_head bitmaps
;
2521 struct btrfs_free_space
*entry
, *tmp
;
2525 /* for metadata, allow allocates with more holes */
2526 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2527 min_bytes
= bytes
+ empty_size
;
2528 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2530 * we want to do larger allocations when we are
2531 * flushing out the delayed refs, it helps prevent
2532 * making more work as we go along.
2534 if (trans
->transaction
->delayed_refs
.flushing
)
2535 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 1);
2537 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 4);
2539 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2541 spin_lock(&ctl
->tree_lock
);
2544 * If we know we don't have enough space to make a cluster don't even
2545 * bother doing all the work to try and find one.
2547 if (ctl
->free_space
< min_bytes
) {
2548 spin_unlock(&ctl
->tree_lock
);
2552 spin_lock(&cluster
->lock
);
2554 /* someone already found a cluster, hooray */
2555 if (cluster
->block_group
) {
2560 INIT_LIST_HEAD(&bitmaps
);
2561 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
2564 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
2565 offset
, bytes
, min_bytes
);
2567 /* Clear our temporary list */
2568 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
2569 list_del_init(&entry
->list
);
2572 atomic_inc(&block_group
->count
);
2573 list_add_tail(&cluster
->block_group_list
,
2574 &block_group
->cluster_list
);
2575 cluster
->block_group
= block_group
;
2578 spin_unlock(&cluster
->lock
);
2579 spin_unlock(&ctl
->tree_lock
);
2585 * simple code to zero out a cluster
2587 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2589 spin_lock_init(&cluster
->lock
);
2590 spin_lock_init(&cluster
->refill_lock
);
2591 cluster
->root
= RB_ROOT
;
2592 cluster
->max_size
= 0;
2593 INIT_LIST_HEAD(&cluster
->block_group_list
);
2594 cluster
->block_group
= NULL
;
2597 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2598 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2600 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2601 struct btrfs_free_space
*entry
= NULL
;
2602 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2604 u64 actually_trimmed
;
2609 while (start
< end
) {
2610 spin_lock(&ctl
->tree_lock
);
2612 if (ctl
->free_space
< minlen
) {
2613 spin_unlock(&ctl
->tree_lock
);
2617 entry
= tree_search_offset(ctl
, start
, 0, 1);
2619 entry
= tree_search_offset(ctl
,
2620 offset_to_bitmap(ctl
, start
),
2623 if (!entry
|| entry
->offset
>= end
) {
2624 spin_unlock(&ctl
->tree_lock
);
2628 if (entry
->bitmap
) {
2629 ret
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2632 spin_unlock(&ctl
->tree_lock
);
2635 bytes
= min(bytes
, end
- start
);
2636 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2637 if (entry
->bytes
== 0)
2638 free_bitmap(ctl
, entry
);
2640 start
= entry
->offset
+ BITS_PER_BITMAP
*
2641 block_group
->sectorsize
;
2642 spin_unlock(&ctl
->tree_lock
);
2647 start
= entry
->offset
;
2648 bytes
= min(entry
->bytes
, end
- start
);
2649 unlink_free_space(ctl
, entry
);
2650 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2653 spin_unlock(&ctl
->tree_lock
);
2655 if (bytes
>= minlen
) {
2656 struct btrfs_space_info
*space_info
;
2659 space_info
= block_group
->space_info
;
2660 spin_lock(&space_info
->lock
);
2661 spin_lock(&block_group
->lock
);
2662 if (!block_group
->ro
) {
2663 block_group
->reserved
+= bytes
;
2664 space_info
->bytes_reserved
+= bytes
;
2667 spin_unlock(&block_group
->lock
);
2668 spin_unlock(&space_info
->lock
);
2670 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2675 btrfs_add_free_space(block_group
, start
, bytes
);
2677 spin_lock(&space_info
->lock
);
2678 spin_lock(&block_group
->lock
);
2679 if (block_group
->ro
)
2680 space_info
->bytes_readonly
+= bytes
;
2681 block_group
->reserved
-= bytes
;
2682 space_info
->bytes_reserved
-= bytes
;
2683 spin_unlock(&space_info
->lock
);
2684 spin_unlock(&block_group
->lock
);
2689 *trimmed
+= actually_trimmed
;
2694 if (fatal_signal_pending(current
)) {
2706 * Find the left-most item in the cache tree, and then return the
2707 * smallest inode number in the item.
2709 * Note: the returned inode number may not be the smallest one in
2710 * the tree, if the left-most item is a bitmap.
2712 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2714 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2715 struct btrfs_free_space
*entry
= NULL
;
2718 spin_lock(&ctl
->tree_lock
);
2720 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2723 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2724 struct btrfs_free_space
, offset_index
);
2726 if (!entry
->bitmap
) {
2727 ino
= entry
->offset
;
2729 unlink_free_space(ctl
, entry
);
2733 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2735 link_free_space(ctl
, entry
);
2741 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2745 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2746 if (entry
->bytes
== 0)
2747 free_bitmap(ctl
, entry
);
2750 spin_unlock(&ctl
->tree_lock
);
2755 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
2756 struct btrfs_path
*path
)
2758 struct inode
*inode
= NULL
;
2760 spin_lock(&root
->cache_lock
);
2761 if (root
->cache_inode
)
2762 inode
= igrab(root
->cache_inode
);
2763 spin_unlock(&root
->cache_lock
);
2767 inode
= __lookup_free_space_inode(root
, path
, 0);
2771 spin_lock(&root
->cache_lock
);
2772 if (!btrfs_fs_closing(root
->fs_info
))
2773 root
->cache_inode
= igrab(inode
);
2774 spin_unlock(&root
->cache_lock
);
2779 int create_free_ino_inode(struct btrfs_root
*root
,
2780 struct btrfs_trans_handle
*trans
,
2781 struct btrfs_path
*path
)
2783 return __create_free_space_inode(root
, trans
, path
,
2784 BTRFS_FREE_INO_OBJECTID
, 0);
2787 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2789 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2790 struct btrfs_path
*path
;
2791 struct inode
*inode
;
2793 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
2795 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2799 * If we're unmounting then just return, since this does a search on the
2800 * normal root and not the commit root and we could deadlock.
2802 if (btrfs_fs_closing(fs_info
))
2805 path
= btrfs_alloc_path();
2809 inode
= lookup_free_ino_inode(root
, path
);
2813 if (root_gen
!= BTRFS_I(inode
)->generation
)
2816 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
2819 printk(KERN_ERR
"btrfs: failed to load free ino cache for "
2820 "root %llu\n", root
->root_key
.objectid
);
2824 btrfs_free_path(path
);
2828 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
2829 struct btrfs_trans_handle
*trans
,
2830 struct btrfs_path
*path
)
2832 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2833 struct inode
*inode
;
2836 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2839 inode
= lookup_free_ino_inode(root
, path
);
2843 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, trans
, path
, 0);
2845 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
2847 printk(KERN_ERR
"btrfs: failed to write free ino cache "
2848 "for root %llu\n", root
->root_key
.objectid
);