2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
21 #include "free-space-cache.h"
22 #include "transaction.h"
24 #include "extent_io.h"
30 * Kernel always uses PAGE_CACHE_SIZE for sectorsize, but we don't have
31 * anything like that in userspace and have to get the value from the
34 #define BITS_PER_BITMAP(sectorsize) ((sectorsize) * 8)
35 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
37 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
38 struct btrfs_free_space
*info
);
39 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
);
44 struct btrfs_root
*root
;
49 unsigned check_crcs
:1;
52 static int io_ctl_init(struct io_ctl
*io_ctl
, u64 size
, u64 ino
,
53 struct btrfs_root
*root
)
55 memset(io_ctl
, 0, sizeof(struct io_ctl
));
56 io_ctl
->num_pages
= (size
+ root
->sectorsize
- 1) / root
->sectorsize
;
57 io_ctl
->buffer
= kzalloc(size
, GFP_NOFS
);
60 io_ctl
->total_size
= size
;
62 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
63 io_ctl
->check_crcs
= 1;
67 static void io_ctl_free(struct io_ctl
*io_ctl
)
69 kfree(io_ctl
->buffer
);
72 static void io_ctl_unmap_page(struct io_ctl
*io_ctl
)
80 static void io_ctl_map_page(struct io_ctl
*io_ctl
, int clear
)
82 BUG_ON(io_ctl
->index
>= io_ctl
->num_pages
);
83 io_ctl
->cur
= io_ctl
->buffer
+ (io_ctl
->index
++ * io_ctl
->root
->sectorsize
);
84 io_ctl
->orig
= io_ctl
->cur
;
85 io_ctl
->size
= io_ctl
->root
->sectorsize
;
87 memset(io_ctl
->cur
, 0, io_ctl
->root
->sectorsize
);
90 static void io_ctl_drop_pages(struct io_ctl
*io_ctl
)
92 io_ctl_unmap_page(io_ctl
);
95 static int io_ctl_prepare_pages(struct io_ctl
*io_ctl
, struct btrfs_root
*root
,
96 struct btrfs_path
*path
, u64 ino
)
98 struct extent_buffer
*leaf
;
99 struct btrfs_file_extent_item
*fi
;
100 struct btrfs_key key
;
106 key
.type
= BTRFS_EXTENT_DATA_KEY
;
109 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
112 "Couldn't find file extent item for free space inode"
114 btrfs_release_path(path
);
118 while (total_read
< io_ctl
->total_size
) {
119 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
120 ret
= btrfs_next_leaf(root
, path
);
126 leaf
= path
->nodes
[0];
128 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
129 if (key
.objectid
!= ino
) {
134 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
139 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
140 struct btrfs_file_extent_item
);
141 if (btrfs_file_extent_type(path
->nodes
[0], fi
) !=
142 BTRFS_FILE_EXTENT_REG
) {
143 fprintf(stderr
, "Not the file extent type we wanted\n");
148 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
) +
149 btrfs_file_extent_offset(leaf
, fi
);
150 len
= btrfs_file_extent_num_bytes(leaf
, fi
);
151 ret
= read_data_from_disk(root
->fs_info
,
152 io_ctl
->buffer
+ key
.offset
, bytenr
,
160 btrfs_release_path(path
);
164 static int io_ctl_check_generation(struct io_ctl
*io_ctl
, u64 generation
)
169 * Skip the crc area. If we don't check crcs then we just have a 64bit
170 * chunk at the front of the first page.
172 if (io_ctl
->check_crcs
) {
173 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
174 io_ctl
->size
-= sizeof(u64
) +
175 (sizeof(u32
) * io_ctl
->num_pages
);
177 io_ctl
->cur
+= sizeof(u64
);
178 io_ctl
->size
-= sizeof(u64
) * 2;
182 if (le64_to_cpu(*gen
) != generation
) {
183 printk("btrfs: space cache generation "
184 "(%Lu) does not match inode (%Lu)\n", *gen
,
186 io_ctl_unmap_page(io_ctl
);
189 io_ctl
->cur
+= sizeof(u64
);
193 static int io_ctl_check_crc(struct io_ctl
*io_ctl
, int index
)
199 if (!io_ctl
->check_crcs
) {
200 io_ctl_map_page(io_ctl
, 0);
205 offset
= sizeof(u32
) * io_ctl
->num_pages
;
207 tmp
= io_ctl
->buffer
;
211 io_ctl_map_page(io_ctl
, 0);
212 crc
= crc32c(crc
, io_ctl
->orig
+ offset
, io_ctl
->root
->sectorsize
- offset
);
213 btrfs_csum_final(crc
, (char *)&crc
);
215 printk("btrfs: csum mismatch on free space cache\n");
216 io_ctl_unmap_page(io_ctl
);
223 static int io_ctl_read_entry(struct io_ctl
*io_ctl
,
224 struct btrfs_free_space
*entry
, u8
*type
)
226 struct btrfs_free_space_entry
*e
;
230 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
236 entry
->offset
= le64_to_cpu(e
->offset
);
237 entry
->bytes
= le64_to_cpu(e
->bytes
);
239 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
240 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
242 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
245 io_ctl_unmap_page(io_ctl
);
250 static int io_ctl_read_bitmap(struct io_ctl
*io_ctl
,
251 struct btrfs_free_space
*entry
)
255 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
259 memcpy(entry
->bitmap
, io_ctl
->cur
, io_ctl
->root
->sectorsize
);
260 io_ctl_unmap_page(io_ctl
);
266 static int __load_free_space_cache(struct btrfs_root
*root
,
267 struct btrfs_free_space_ctl
*ctl
,
268 struct btrfs_path
*path
, u64 offset
)
270 struct btrfs_free_space_header
*header
;
271 struct btrfs_inode_item
*inode_item
;
272 struct extent_buffer
*leaf
;
273 struct io_ctl io_ctl
;
274 struct btrfs_key key
;
275 struct btrfs_key inode_location
;
276 struct btrfs_disk_key disk_key
;
277 struct btrfs_free_space
*e
, *n
;
278 struct list_head bitmaps
;
286 INIT_LIST_HEAD(&bitmaps
);
288 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
292 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
295 } else if (ret
> 0) {
296 btrfs_release_path(path
);
300 leaf
= path
->nodes
[0];
301 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
302 struct btrfs_free_space_header
);
303 num_entries
= btrfs_free_space_entries(leaf
, header
);
304 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
305 generation
= btrfs_free_space_generation(leaf
, header
);
306 btrfs_free_space_key(leaf
, header
, &disk_key
);
307 btrfs_disk_key_to_cpu(&inode_location
, &disk_key
);
308 btrfs_release_path(path
);
310 ret
= btrfs_search_slot(NULL
, root
, &inode_location
, path
, 0, 0);
312 fprintf(stderr
, "Couldn't find free space inode %d\n", ret
);
316 leaf
= path
->nodes
[0];
317 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
318 struct btrfs_inode_item
);
320 inode_size
= btrfs_inode_size(leaf
, inode_item
);
321 if (!inode_size
|| !btrfs_inode_generation(leaf
, inode_item
)) {
322 btrfs_release_path(path
);
326 if (btrfs_inode_generation(leaf
, inode_item
) != generation
) {
328 "free space inode generation (%llu) did not match "
329 "free space cache generation (%llu)\n",
330 (unsigned long long)btrfs_inode_generation(leaf
,
332 (unsigned long long)generation
);
333 btrfs_release_path(path
);
337 btrfs_release_path(path
);
342 ret
= io_ctl_init(&io_ctl
, inode_size
, inode_location
.objectid
, root
);
346 ret
= io_ctl_prepare_pages(&io_ctl
, root
, path
,
347 inode_location
.objectid
);
351 ret
= io_ctl_check_crc(&io_ctl
, 0);
355 ret
= io_ctl_check_generation(&io_ctl
, generation
);
359 while (num_entries
) {
360 e
= calloc(1, sizeof(*e
));
364 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
375 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
376 ret
= link_free_space(ctl
, e
);
379 "Duplicate entries in free space cache\n");
384 BUG_ON(!num_bitmaps
);
386 e
->bitmap
= kzalloc(ctl
->sectorsize
, GFP_NOFS
);
391 ret
= link_free_space(ctl
, e
);
392 ctl
->total_bitmaps
++;
395 "Duplicate entries in free space cache\n");
400 list_add_tail(&e
->list
, &bitmaps
);
406 io_ctl_unmap_page(&io_ctl
);
409 * We add the bitmaps at the end of the entries in order that
410 * the bitmap entries are added to the cache.
412 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
413 list_del_init(&e
->list
);
414 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
419 io_ctl_drop_pages(&io_ctl
);
420 merge_space_tree(ctl
);
423 io_ctl_free(&io_ctl
);
426 io_ctl_drop_pages(&io_ctl
);
427 __btrfs_remove_free_space_cache(ctl
);
431 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
432 struct btrfs_block_group_cache
*block_group
)
434 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
435 struct btrfs_path
*path
;
436 u64 used
= btrfs_block_group_used(&block_group
->item
);
440 path
= btrfs_alloc_path();
444 ret
= __load_free_space_cache(fs_info
->tree_root
, ctl
, path
,
445 block_group
->key
.objectid
);
446 btrfs_free_path(path
);
448 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
449 block_group
->bytes_super
));
450 if (ret
== 1 && !matched
) {
451 __btrfs_remove_free_space_cache(ctl
);
453 "block group %llu has wrong amount of free space\n",
454 block_group
->key
.objectid
);
462 "failed to load free space cache for block group %llu\n",
463 block_group
->key
.objectid
);
469 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
472 BUG_ON(offset
< bitmap_start
);
473 offset
-= bitmap_start
;
474 return (unsigned long)(offset
/ unit
);
477 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
479 return (unsigned long)(bytes
/ unit
);
482 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
483 struct rb_node
*node
, int bitmap
)
485 struct rb_node
**p
= &root
->rb_node
;
486 struct rb_node
*parent
= NULL
;
487 struct btrfs_free_space
*info
;
491 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
493 if (offset
< info
->offset
) {
495 } else if (offset
> info
->offset
) {
499 * we could have a bitmap entry and an extent entry
500 * share the same offset. If this is the case, we want
501 * the extent entry to always be found first if we do a
502 * linear search through the tree, since we want to have
503 * the quickest allocation time, and allocating from an
504 * extent is faster than allocating from a bitmap. So
505 * if we're inserting a bitmap and we find an entry at
506 * this offset, we want to go right, or after this entry
507 * logically. If we are inserting an extent and we've
508 * found a bitmap, we want to go left, or before
523 rb_link_node(node
, parent
, p
);
524 rb_insert_color(node
, root
);
530 * searches the tree for the given offset.
532 * fuzzy - If this is set, then we are trying to make an allocation, and we just
533 * want a section that has at least bytes size and comes at or after the given
536 static struct btrfs_free_space
*
537 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
538 u64 offset
, int bitmap_only
, int fuzzy
)
540 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
541 struct btrfs_free_space
*entry
, *prev
= NULL
;
542 u32 sectorsize
= ctl
->sectorsize
;
544 /* find entry that is closest to the 'offset' */
551 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
554 if (offset
< entry
->offset
)
556 else if (offset
> entry
->offset
)
569 * bitmap entry and extent entry may share same offset,
570 * in that case, bitmap entry comes after extent entry.
575 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
576 if (entry
->offset
!= offset
)
579 WARN_ON(!entry
->bitmap
);
584 * if previous extent entry covers the offset,
585 * we should return it instead of the bitmap entry
587 n
= rb_prev(&entry
->offset_index
);
589 prev
= rb_entry(n
, struct btrfs_free_space
,
592 prev
->offset
+ prev
->bytes
> offset
)
602 /* find last entry before the 'offset' */
604 if (entry
->offset
> offset
) {
605 n
= rb_prev(&entry
->offset_index
);
607 entry
= rb_entry(n
, struct btrfs_free_space
,
609 BUG_ON(entry
->offset
> offset
);
619 n
= rb_prev(&entry
->offset_index
);
621 prev
= rb_entry(n
, struct btrfs_free_space
,
624 prev
->offset
+ prev
->bytes
> offset
)
627 if (entry
->offset
+ BITS_PER_BITMAP(sectorsize
) * ctl
->unit
> offset
)
629 } else if (entry
->offset
+ entry
->bytes
> offset
)
637 if (entry
->offset
+ BITS_PER_BITMAP(sectorsize
) *
641 if (entry
->offset
+ entry
->bytes
> offset
)
645 n
= rb_next(&entry
->offset_index
);
648 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
653 void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
654 struct btrfs_free_space
*info
)
656 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
658 ctl
->free_space
-= info
->bytes
;
661 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
662 struct btrfs_free_space
*info
)
666 BUG_ON(!info
->bitmap
&& !info
->bytes
);
667 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
668 &info
->offset_index
, (info
->bitmap
!= NULL
));
672 ctl
->free_space
+= info
->bytes
;
677 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
678 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
681 unsigned long found_bits
= 0;
682 unsigned long bits
, i
;
683 unsigned long next_zero
;
684 u32 sectorsize
= ctl
->sectorsize
;
686 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
687 max_t(u64
, *offset
, bitmap_info
->offset
));
688 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
690 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP(sectorsize
)) {
691 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
692 BITS_PER_BITMAP(sectorsize
), i
);
693 if ((next_zero
- i
) >= bits
) {
694 found_bits
= next_zero
- i
;
701 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
702 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
709 struct btrfs_free_space
*
710 btrfs_find_free_space(struct btrfs_free_space_ctl
*ctl
, u64 offset
, u64 bytes
)
712 return tree_search_offset(ctl
, offset
, 0, 0);
715 static void try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
716 struct btrfs_free_space
*info
)
718 struct btrfs_free_space
*left_info
;
719 struct btrfs_free_space
*right_info
;
720 u64 offset
= info
->offset
;
721 u64 bytes
= info
->bytes
;
724 * first we want to see if there is free space adjacent to the range we
725 * are adding, if there is remove that struct and add a new one to
726 * cover the entire range
728 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
729 if (right_info
&& rb_prev(&right_info
->offset_index
))
730 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
731 struct btrfs_free_space
, offset_index
);
733 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
735 if (right_info
&& !right_info
->bitmap
) {
736 unlink_free_space(ctl
, right_info
);
737 info
->bytes
+= right_info
->bytes
;
741 if (left_info
&& !left_info
->bitmap
&&
742 left_info
->offset
+ left_info
->bytes
== offset
) {
743 unlink_free_space(ctl
, left_info
);
744 info
->offset
= left_info
->offset
;
745 info
->bytes
+= left_info
->bytes
;
750 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
753 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
754 struct btrfs_free_space
*info
;
758 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
759 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
760 if (info
->bytes
>= bytes
&& !block_group
->ro
)
762 printk("entry offset %llu, bytes %llu, bitmap %s\n",
763 (unsigned long long)info
->offset
,
764 (unsigned long long)info
->bytes
,
765 (info
->bitmap
) ? "yes" : "no");
767 printk("%d blocks of free space at or bigger than bytes is \n", count
);
770 int btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
,
773 struct btrfs_free_space_ctl
*ctl
;
775 ctl
= calloc(1, sizeof(*ctl
));
779 ctl
->sectorsize
= sectorsize
;
780 ctl
->unit
= sectorsize
;
781 ctl
->start
= block_group
->key
.objectid
;
782 ctl
->private = block_group
;
783 block_group
->free_space_ctl
= ctl
;
788 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
790 struct btrfs_free_space
*info
;
791 struct rb_node
*node
;
793 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
794 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
795 unlink_free_space(ctl
, info
);
801 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
803 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);
806 int btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
, u64 offset
,
809 struct btrfs_free_space
*info
;
812 info
= calloc(1, sizeof(*info
));
816 info
->offset
= offset
;
819 try_merge_free_space(ctl
, info
);
821 ret
= link_free_space(ctl
, info
);
823 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
824 BUG_ON(ret
== -EEXIST
);
831 * Merges all the free space cache and kills the bitmap entries since we just
832 * want to use the free space cache to verify it's correct, no reason to keep
833 * the bitmaps around to confuse things.
835 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
837 struct btrfs_free_space
*e
, *prev
= NULL
;
840 u32 sectorsize
= ctl
->sectorsize
;
844 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
845 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
847 u64 offset
= e
->offset
, bytes
= ctl
->unit
;
850 end
= e
->offset
+ (u64
)(BITS_PER_BITMAP(sectorsize
) * ctl
->unit
);
852 unlink_free_space(ctl
, e
);
853 while (!(search_bitmap(ctl
, e
, &offset
, &bytes
))) {
854 ret
= btrfs_add_free_space(ctl
, offset
,
868 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
869 unlink_free_space(ctl
, prev
);
870 unlink_free_space(ctl
, e
);
871 prev
->bytes
+= e
->bytes
;
873 link_free_space(ctl
, prev
);