Btrfs: Make the code for reading/writing free space cache generic
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / free-space-cache.c
blobfcbdcef6ca28386389ccf59e61e3a979db4b3d8e
1 /*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include "ctree.h"
24 #include "free-space-cache.h"
25 #include "transaction.h"
26 #include "disk-io.h"
27 #include "extent_io.h"
28 #include "inode-map.h"
30 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
31 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33 static int link_free_space(struct btrfs_free_space_ctl *ctl,
34 struct btrfs_free_space *info);
36 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
37 struct btrfs_path *path,
38 u64 offset)
40 struct btrfs_key key;
41 struct btrfs_key location;
42 struct btrfs_disk_key disk_key;
43 struct btrfs_free_space_header *header;
44 struct extent_buffer *leaf;
45 struct inode *inode = NULL;
46 int ret;
48 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
49 key.offset = offset;
50 key.type = 0;
52 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
53 if (ret < 0)
54 return ERR_PTR(ret);
55 if (ret > 0) {
56 btrfs_release_path(root, path);
57 return ERR_PTR(-ENOENT);
60 leaf = path->nodes[0];
61 header = btrfs_item_ptr(leaf, path->slots[0],
62 struct btrfs_free_space_header);
63 btrfs_free_space_key(leaf, header, &disk_key);
64 btrfs_disk_key_to_cpu(&location, &disk_key);
65 btrfs_release_path(root, path);
67 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
68 if (!inode)
69 return ERR_PTR(-ENOENT);
70 if (IS_ERR(inode))
71 return inode;
72 if (is_bad_inode(inode)) {
73 iput(inode);
74 return ERR_PTR(-ENOENT);
77 inode->i_mapping->flags &= ~__GFP_FS;
79 return inode;
82 struct inode *lookup_free_space_inode(struct btrfs_root *root,
83 struct btrfs_block_group_cache
84 *block_group, struct btrfs_path *path)
86 struct inode *inode = NULL;
88 spin_lock(&block_group->lock);
89 if (block_group->inode)
90 inode = igrab(block_group->inode);
91 spin_unlock(&block_group->lock);
92 if (inode)
93 return inode;
95 inode = __lookup_free_space_inode(root, path,
96 block_group->key.objectid);
97 if (IS_ERR(inode))
98 return inode;
100 spin_lock(&block_group->lock);
101 if (!root->fs_info->closing) {
102 block_group->inode = igrab(inode);
103 block_group->iref = 1;
105 spin_unlock(&block_group->lock);
107 return inode;
110 int __create_free_space_inode(struct btrfs_root *root,
111 struct btrfs_trans_handle *trans,
112 struct btrfs_path *path, u64 ino, u64 offset)
114 struct btrfs_key key;
115 struct btrfs_disk_key disk_key;
116 struct btrfs_free_space_header *header;
117 struct btrfs_inode_item *inode_item;
118 struct extent_buffer *leaf;
119 int ret;
121 ret = btrfs_insert_empty_inode(trans, root, path, ino);
122 if (ret)
123 return ret;
125 leaf = path->nodes[0];
126 inode_item = btrfs_item_ptr(leaf, path->slots[0],
127 struct btrfs_inode_item);
128 btrfs_item_key(leaf, &disk_key, path->slots[0]);
129 memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
130 sizeof(*inode_item));
131 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
132 btrfs_set_inode_size(leaf, inode_item, 0);
133 btrfs_set_inode_nbytes(leaf, inode_item, 0);
134 btrfs_set_inode_uid(leaf, inode_item, 0);
135 btrfs_set_inode_gid(leaf, inode_item, 0);
136 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
137 btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
138 BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
139 btrfs_set_inode_nlink(leaf, inode_item, 1);
140 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
141 btrfs_set_inode_block_group(leaf, inode_item, offset);
142 btrfs_mark_buffer_dirty(leaf);
143 btrfs_release_path(root, path);
145 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
146 key.offset = offset;
147 key.type = 0;
149 ret = btrfs_insert_empty_item(trans, root, path, &key,
150 sizeof(struct btrfs_free_space_header));
151 if (ret < 0) {
152 btrfs_release_path(root, path);
153 return ret;
155 leaf = path->nodes[0];
156 header = btrfs_item_ptr(leaf, path->slots[0],
157 struct btrfs_free_space_header);
158 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
159 btrfs_set_free_space_key(leaf, header, &disk_key);
160 btrfs_mark_buffer_dirty(leaf);
161 btrfs_release_path(root, path);
163 return 0;
166 int create_free_space_inode(struct btrfs_root *root,
167 struct btrfs_trans_handle *trans,
168 struct btrfs_block_group_cache *block_group,
169 struct btrfs_path *path)
171 int ret;
172 u64 ino;
174 ret = btrfs_find_free_objectid(root, &ino);
175 if (ret < 0)
176 return ret;
178 return __create_free_space_inode(root, trans, path, ino,
179 block_group->key.objectid);
182 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
183 struct btrfs_trans_handle *trans,
184 struct btrfs_path *path,
185 struct inode *inode)
187 loff_t oldsize;
188 int ret = 0;
190 trans->block_rsv = root->orphan_block_rsv;
191 ret = btrfs_block_rsv_check(trans, root,
192 root->orphan_block_rsv,
193 0, 5);
194 if (ret)
195 return ret;
197 oldsize = i_size_read(inode);
198 btrfs_i_size_write(inode, 0);
199 truncate_pagecache(inode, oldsize, 0);
202 * We don't need an orphan item because truncating the free space cache
203 * will never be split across transactions.
205 ret = btrfs_truncate_inode_items(trans, root, inode,
206 0, BTRFS_EXTENT_DATA_KEY);
207 if (ret) {
208 WARN_ON(1);
209 return ret;
212 return btrfs_update_inode(trans, root, inode);
215 static int readahead_cache(struct inode *inode)
217 struct file_ra_state *ra;
218 unsigned long last_index;
220 ra = kzalloc(sizeof(*ra), GFP_NOFS);
221 if (!ra)
222 return -ENOMEM;
224 file_ra_state_init(ra, inode->i_mapping);
225 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
227 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
229 kfree(ra);
231 return 0;
234 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
235 struct btrfs_free_space_ctl *ctl,
236 struct btrfs_path *path, u64 offset)
238 struct btrfs_free_space_header *header;
239 struct extent_buffer *leaf;
240 struct page *page;
241 u32 *checksums = NULL, *crc;
242 char *disk_crcs = NULL;
243 struct btrfs_key key;
244 struct list_head bitmaps;
245 u64 num_entries;
246 u64 num_bitmaps;
247 u64 generation;
248 u32 cur_crc = ~(u32)0;
249 pgoff_t index = 0;
250 unsigned long first_page_offset;
251 int num_checksums;
252 int ret = 0, ret2;
254 INIT_LIST_HEAD(&bitmaps);
256 /* Nothing in the space cache, goodbye */
257 if (!i_size_read(inode))
258 goto out;
260 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
261 key.offset = offset;
262 key.type = 0;
264 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
265 if (ret < 0)
266 goto out;
267 else if (ret > 0) {
268 btrfs_release_path(root, path);
269 ret = 0;
270 goto out;
273 ret = -1;
275 leaf = path->nodes[0];
276 header = btrfs_item_ptr(leaf, path->slots[0],
277 struct btrfs_free_space_header);
278 num_entries = btrfs_free_space_entries(leaf, header);
279 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
280 generation = btrfs_free_space_generation(leaf, header);
281 btrfs_release_path(root, path);
283 if (BTRFS_I(inode)->generation != generation) {
284 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
285 " not match free space cache generation (%llu)\n",
286 (unsigned long long)BTRFS_I(inode)->generation,
287 (unsigned long long)generation);
288 goto out;
291 if (!num_entries)
292 goto out;
294 /* Setup everything for doing checksumming */
295 num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
296 checksums = crc = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
297 if (!checksums)
298 goto out;
299 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
300 disk_crcs = kzalloc(first_page_offset, GFP_NOFS);
301 if (!disk_crcs)
302 goto out;
304 ret = readahead_cache(inode);
305 if (ret)
306 goto out;
308 while (1) {
309 struct btrfs_free_space_entry *entry;
310 struct btrfs_free_space *e;
311 void *addr;
312 unsigned long offset = 0;
313 unsigned long start_offset = 0;
314 int need_loop = 0;
316 if (!num_entries && !num_bitmaps)
317 break;
319 if (index == 0) {
320 start_offset = first_page_offset;
321 offset = start_offset;
324 page = grab_cache_page(inode->i_mapping, index);
325 if (!page)
326 goto free_cache;
328 if (!PageUptodate(page)) {
329 btrfs_readpage(NULL, page);
330 lock_page(page);
331 if (!PageUptodate(page)) {
332 unlock_page(page);
333 page_cache_release(page);
334 printk(KERN_ERR "btrfs: error reading free "
335 "space cache\n");
336 goto free_cache;
339 addr = kmap(page);
341 if (index == 0) {
342 u64 *gen;
344 memcpy(disk_crcs, addr, first_page_offset);
345 gen = addr + (sizeof(u32) * num_checksums);
346 if (*gen != BTRFS_I(inode)->generation) {
347 printk(KERN_ERR "btrfs: space cache generation"
348 " (%llu) does not match inode (%llu)\n",
349 (unsigned long long)*gen,
350 (unsigned long long)
351 BTRFS_I(inode)->generation);
352 kunmap(page);
353 unlock_page(page);
354 page_cache_release(page);
355 goto free_cache;
357 crc = (u32 *)disk_crcs;
359 entry = addr + start_offset;
361 /* First lets check our crc before we do anything fun */
362 cur_crc = ~(u32)0;
363 cur_crc = btrfs_csum_data(root, addr + start_offset, cur_crc,
364 PAGE_CACHE_SIZE - start_offset);
365 btrfs_csum_final(cur_crc, (char *)&cur_crc);
366 if (cur_crc != *crc) {
367 printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
368 index);
369 kunmap(page);
370 unlock_page(page);
371 page_cache_release(page);
372 goto free_cache;
374 crc++;
376 while (1) {
377 if (!num_entries)
378 break;
380 need_loop = 1;
381 e = kmem_cache_zalloc(btrfs_free_space_cachep,
382 GFP_NOFS);
383 if (!e) {
384 kunmap(page);
385 unlock_page(page);
386 page_cache_release(page);
387 goto free_cache;
390 e->offset = le64_to_cpu(entry->offset);
391 e->bytes = le64_to_cpu(entry->bytes);
392 if (!e->bytes) {
393 kunmap(page);
394 kmem_cache_free(btrfs_free_space_cachep, e);
395 unlock_page(page);
396 page_cache_release(page);
397 goto free_cache;
400 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
401 spin_lock(&ctl->tree_lock);
402 ret = link_free_space(ctl, e);
403 spin_unlock(&ctl->tree_lock);
404 BUG_ON(ret);
405 } else {
406 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
407 if (!e->bitmap) {
408 kunmap(page);
409 kmem_cache_free(
410 btrfs_free_space_cachep, e);
411 unlock_page(page);
412 page_cache_release(page);
413 goto free_cache;
415 spin_lock(&ctl->tree_lock);
416 ret2 = link_free_space(ctl, e);
417 ctl->total_bitmaps++;
418 ctl->op->recalc_thresholds(ctl);
419 spin_unlock(&ctl->tree_lock);
420 list_add_tail(&e->list, &bitmaps);
423 num_entries--;
424 offset += sizeof(struct btrfs_free_space_entry);
425 if (offset + sizeof(struct btrfs_free_space_entry) >=
426 PAGE_CACHE_SIZE)
427 break;
428 entry++;
432 * We read an entry out of this page, we need to move on to the
433 * next page.
435 if (need_loop) {
436 kunmap(page);
437 goto next;
441 * We add the bitmaps at the end of the entries in order that
442 * the bitmap entries are added to the cache.
444 e = list_entry(bitmaps.next, struct btrfs_free_space, list);
445 list_del_init(&e->list);
446 memcpy(e->bitmap, addr, PAGE_CACHE_SIZE);
447 kunmap(page);
448 num_bitmaps--;
449 next:
450 unlock_page(page);
451 page_cache_release(page);
452 index++;
455 ret = 1;
456 out:
457 kfree(checksums);
458 kfree(disk_crcs);
459 return ret;
460 free_cache:
461 __btrfs_remove_free_space_cache(ctl);
462 goto out;
465 int load_free_space_cache(struct btrfs_fs_info *fs_info,
466 struct btrfs_block_group_cache *block_group)
468 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
469 struct btrfs_root *root = fs_info->tree_root;
470 struct inode *inode;
471 struct btrfs_path *path;
472 int ret;
473 bool matched;
474 u64 used = btrfs_block_group_used(&block_group->item);
477 * If we're unmounting then just return, since this does a search on the
478 * normal root and not the commit root and we could deadlock.
480 smp_mb();
481 if (fs_info->closing)
482 return 0;
485 * If this block group has been marked to be cleared for one reason or
486 * another then we can't trust the on disk cache, so just return.
488 spin_lock(&block_group->lock);
489 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
490 spin_unlock(&block_group->lock);
491 return 0;
493 spin_unlock(&block_group->lock);
495 path = btrfs_alloc_path();
496 if (!path)
497 return 0;
499 inode = lookup_free_space_inode(root, block_group, path);
500 if (IS_ERR(inode)) {
501 btrfs_free_path(path);
502 return 0;
505 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
506 path, block_group->key.objectid);
507 btrfs_free_path(path);
508 if (ret <= 0)
509 goto out;
511 spin_lock(&ctl->tree_lock);
512 matched = (ctl->free_space == (block_group->key.offset - used -
513 block_group->bytes_super));
514 spin_unlock(&ctl->tree_lock);
516 if (!matched) {
517 __btrfs_remove_free_space_cache(ctl);
518 printk(KERN_ERR "block group %llu has an wrong amount of free "
519 "space\n", block_group->key.objectid);
520 ret = -1;
522 out:
523 if (ret < 0) {
524 /* This cache is bogus, make sure it gets cleared */
525 spin_lock(&block_group->lock);
526 block_group->disk_cache_state = BTRFS_DC_CLEAR;
527 spin_unlock(&block_group->lock);
529 printk(KERN_ERR "btrfs: failed to load free space cache "
530 "for block group %llu\n", block_group->key.objectid);
533 iput(inode);
534 return ret;
537 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
538 struct btrfs_free_space_ctl *ctl,
539 struct btrfs_block_group_cache *block_group,
540 struct btrfs_trans_handle *trans,
541 struct btrfs_path *path, u64 offset)
543 struct btrfs_free_space_header *header;
544 struct extent_buffer *leaf;
545 struct rb_node *node;
546 struct list_head *pos, *n;
547 struct page **pages;
548 struct page *page;
549 struct extent_state *cached_state = NULL;
550 struct btrfs_free_cluster *cluster = NULL;
551 struct extent_io_tree *unpin = NULL;
552 struct list_head bitmap_list;
553 struct btrfs_key key;
554 u64 start, end, len;
555 u64 bytes = 0;
556 u32 *crc, *checksums;
557 unsigned long first_page_offset;
558 int index = 0, num_pages = 0;
559 int entries = 0;
560 int bitmaps = 0;
561 int ret = -1;
562 bool next_page = false;
563 bool out_of_space = false;
565 INIT_LIST_HEAD(&bitmap_list);
567 node = rb_first(&ctl->free_space_offset);
568 if (!node)
569 return 0;
571 if (!i_size_read(inode))
572 return -1;
574 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
575 PAGE_CACHE_SHIFT;
576 filemap_write_and_wait(inode->i_mapping);
577 btrfs_wait_ordered_range(inode, inode->i_size &
578 ~(root->sectorsize - 1), (u64)-1);
580 /* We need a checksum per page. */
581 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
582 if (!crc)
583 return -1;
585 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
586 if (!pages) {
587 kfree(crc);
588 return -1;
591 /* Since the first page has all of our checksums and our generation we
592 * need to calculate the offset into the page that we can start writing
593 * our entries.
595 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
597 /* Get the cluster for this block_group if it exists */
598 if (block_group && !list_empty(&block_group->cluster_list))
599 cluster = list_entry(block_group->cluster_list.next,
600 struct btrfs_free_cluster,
601 block_group_list);
604 * We shouldn't have switched the pinned extents yet so this is the
605 * right one
607 unpin = root->fs_info->pinned_extents;
610 * Lock all pages first so we can lock the extent safely.
612 * NOTE: Because we hold the ref the entire time we're going to write to
613 * the page find_get_page should never fail, so we don't do a check
614 * after find_get_page at this point. Just putting this here so people
615 * know and don't freak out.
617 while (index < num_pages) {
618 page = grab_cache_page(inode->i_mapping, index);
619 if (!page) {
620 int i;
622 for (i = 0; i < num_pages; i++) {
623 unlock_page(pages[i]);
624 page_cache_release(pages[i]);
626 goto out_free;
628 pages[index] = page;
629 index++;
632 index = 0;
633 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
634 0, &cached_state, GFP_NOFS);
637 * When searching for pinned extents, we need to start at our start
638 * offset.
640 if (block_group)
641 start = block_group->key.objectid;
643 /* Write out the extent entries */
644 do {
645 struct btrfs_free_space_entry *entry;
646 void *addr;
647 unsigned long offset = 0;
648 unsigned long start_offset = 0;
650 next_page = false;
652 if (index == 0) {
653 start_offset = first_page_offset;
654 offset = start_offset;
657 if (index >= num_pages) {
658 out_of_space = true;
659 break;
662 page = pages[index];
664 addr = kmap(page);
665 entry = addr + start_offset;
667 memset(addr, 0, PAGE_CACHE_SIZE);
668 while (node && !next_page) {
669 struct btrfs_free_space *e;
671 e = rb_entry(node, struct btrfs_free_space, offset_index);
672 entries++;
674 entry->offset = cpu_to_le64(e->offset);
675 entry->bytes = cpu_to_le64(e->bytes);
676 if (e->bitmap) {
677 entry->type = BTRFS_FREE_SPACE_BITMAP;
678 list_add_tail(&e->list, &bitmap_list);
679 bitmaps++;
680 } else {
681 entry->type = BTRFS_FREE_SPACE_EXTENT;
683 node = rb_next(node);
684 if (!node && cluster) {
685 node = rb_first(&cluster->root);
686 cluster = NULL;
688 offset += sizeof(struct btrfs_free_space_entry);
689 if (offset + sizeof(struct btrfs_free_space_entry) >=
690 PAGE_CACHE_SIZE)
691 next_page = true;
692 entry++;
696 * We want to add any pinned extents to our free space cache
697 * so we don't leak the space
699 while (block_group && !next_page &&
700 (start < block_group->key.objectid +
701 block_group->key.offset)) {
702 ret = find_first_extent_bit(unpin, start, &start, &end,
703 EXTENT_DIRTY);
704 if (ret) {
705 ret = 0;
706 break;
709 /* This pinned extent is out of our range */
710 if (start >= block_group->key.objectid +
711 block_group->key.offset)
712 break;
714 len = block_group->key.objectid +
715 block_group->key.offset - start;
716 len = min(len, end + 1 - start);
718 entries++;
719 entry->offset = cpu_to_le64(start);
720 entry->bytes = cpu_to_le64(len);
721 entry->type = BTRFS_FREE_SPACE_EXTENT;
723 start = end + 1;
724 offset += sizeof(struct btrfs_free_space_entry);
725 if (offset + sizeof(struct btrfs_free_space_entry) >=
726 PAGE_CACHE_SIZE)
727 next_page = true;
728 entry++;
730 *crc = ~(u32)0;
731 *crc = btrfs_csum_data(root, addr + start_offset, *crc,
732 PAGE_CACHE_SIZE - start_offset);
733 kunmap(page);
735 btrfs_csum_final(*crc, (char *)crc);
736 crc++;
738 bytes += PAGE_CACHE_SIZE;
740 index++;
741 } while (node || next_page);
743 /* Write out the bitmaps */
744 list_for_each_safe(pos, n, &bitmap_list) {
745 void *addr;
746 struct btrfs_free_space *entry =
747 list_entry(pos, struct btrfs_free_space, list);
749 if (index >= num_pages) {
750 out_of_space = true;
751 break;
753 page = pages[index];
755 addr = kmap(page);
756 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
757 *crc = ~(u32)0;
758 *crc = btrfs_csum_data(root, addr, *crc, PAGE_CACHE_SIZE);
759 kunmap(page);
760 btrfs_csum_final(*crc, (char *)crc);
761 crc++;
762 bytes += PAGE_CACHE_SIZE;
764 list_del_init(&entry->list);
765 index++;
768 if (out_of_space) {
769 btrfs_drop_pages(pages, num_pages);
770 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
771 i_size_read(inode) - 1, &cached_state,
772 GFP_NOFS);
773 ret = 0;
774 goto out_free;
777 /* Zero out the rest of the pages just to make sure */
778 while (index < num_pages) {
779 void *addr;
781 page = pages[index];
782 addr = kmap(page);
783 memset(addr, 0, PAGE_CACHE_SIZE);
784 kunmap(page);
785 bytes += PAGE_CACHE_SIZE;
786 index++;
789 /* Write the checksums and trans id to the first page */
791 void *addr;
792 u64 *gen;
794 page = pages[0];
796 addr = kmap(page);
797 memcpy(addr, checksums, sizeof(u32) * num_pages);
798 gen = addr + (sizeof(u32) * num_pages);
799 *gen = trans->transid;
800 kunmap(page);
803 ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
804 bytes, &cached_state);
805 btrfs_drop_pages(pages, num_pages);
806 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
807 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
809 if (ret) {
810 ret = 0;
811 goto out_free;
814 BTRFS_I(inode)->generation = trans->transid;
816 filemap_write_and_wait(inode->i_mapping);
818 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
819 key.offset = offset;
820 key.type = 0;
822 ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
823 if (ret < 0) {
824 ret = -1;
825 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
826 EXTENT_DIRTY | EXTENT_DELALLOC |
827 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
828 goto out_free;
830 leaf = path->nodes[0];
831 if (ret > 0) {
832 struct btrfs_key found_key;
833 BUG_ON(!path->slots[0]);
834 path->slots[0]--;
835 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
836 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
837 found_key.offset != offset) {
838 ret = -1;
839 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
840 EXTENT_DIRTY | EXTENT_DELALLOC |
841 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
842 GFP_NOFS);
843 btrfs_release_path(root, path);
844 goto out_free;
847 header = btrfs_item_ptr(leaf, path->slots[0],
848 struct btrfs_free_space_header);
849 btrfs_set_free_space_entries(leaf, header, entries);
850 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
851 btrfs_set_free_space_generation(leaf, header, trans->transid);
852 btrfs_mark_buffer_dirty(leaf);
853 btrfs_release_path(root, path);
855 ret = 1;
857 out_free:
858 if (ret != 1) {
859 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
860 BTRFS_I(inode)->generation = 0;
862 kfree(checksums);
863 kfree(pages);
864 btrfs_update_inode(trans, root, inode);
865 return ret;
868 int btrfs_write_out_cache(struct btrfs_root *root,
869 struct btrfs_trans_handle *trans,
870 struct btrfs_block_group_cache *block_group,
871 struct btrfs_path *path)
873 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
874 struct inode *inode;
875 int ret = 0;
877 root = root->fs_info->tree_root;
879 spin_lock(&block_group->lock);
880 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
881 spin_unlock(&block_group->lock);
882 return 0;
884 spin_unlock(&block_group->lock);
886 inode = lookup_free_space_inode(root, block_group, path);
887 if (IS_ERR(inode))
888 return 0;
890 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
891 path, block_group->key.objectid);
892 if (ret < 0) {
893 spin_lock(&block_group->lock);
894 block_group->disk_cache_state = BTRFS_DC_ERROR;
895 spin_unlock(&block_group->lock);
897 printk(KERN_ERR "btrfs: failed to write free space cace "
898 "for block group %llu\n", block_group->key.objectid);
901 iput(inode);
902 return ret;
905 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
906 u64 offset)
908 BUG_ON(offset < bitmap_start);
909 offset -= bitmap_start;
910 return (unsigned long)(div_u64(offset, unit));
913 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
915 return (unsigned long)(div_u64(bytes, unit));
918 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
919 u64 offset)
921 u64 bitmap_start;
922 u64 bytes_per_bitmap;
924 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
925 bitmap_start = offset - ctl->start;
926 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
927 bitmap_start *= bytes_per_bitmap;
928 bitmap_start += ctl->start;
930 return bitmap_start;
933 static int tree_insert_offset(struct rb_root *root, u64 offset,
934 struct rb_node *node, int bitmap)
936 struct rb_node **p = &root->rb_node;
937 struct rb_node *parent = NULL;
938 struct btrfs_free_space *info;
940 while (*p) {
941 parent = *p;
942 info = rb_entry(parent, struct btrfs_free_space, offset_index);
944 if (offset < info->offset) {
945 p = &(*p)->rb_left;
946 } else if (offset > info->offset) {
947 p = &(*p)->rb_right;
948 } else {
950 * we could have a bitmap entry and an extent entry
951 * share the same offset. If this is the case, we want
952 * the extent entry to always be found first if we do a
953 * linear search through the tree, since we want to have
954 * the quickest allocation time, and allocating from an
955 * extent is faster than allocating from a bitmap. So
956 * if we're inserting a bitmap and we find an entry at
957 * this offset, we want to go right, or after this entry
958 * logically. If we are inserting an extent and we've
959 * found a bitmap, we want to go left, or before
960 * logically.
962 if (bitmap) {
963 WARN_ON(info->bitmap);
964 p = &(*p)->rb_right;
965 } else {
966 WARN_ON(!info->bitmap);
967 p = &(*p)->rb_left;
972 rb_link_node(node, parent, p);
973 rb_insert_color(node, root);
975 return 0;
979 * searches the tree for the given offset.
981 * fuzzy - If this is set, then we are trying to make an allocation, and we just
982 * want a section that has at least bytes size and comes at or after the given
983 * offset.
985 static struct btrfs_free_space *
986 tree_search_offset(struct btrfs_free_space_ctl *ctl,
987 u64 offset, int bitmap_only, int fuzzy)
989 struct rb_node *n = ctl->free_space_offset.rb_node;
990 struct btrfs_free_space *entry, *prev = NULL;
992 /* find entry that is closest to the 'offset' */
993 while (1) {
994 if (!n) {
995 entry = NULL;
996 break;
999 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1000 prev = entry;
1002 if (offset < entry->offset)
1003 n = n->rb_left;
1004 else if (offset > entry->offset)
1005 n = n->rb_right;
1006 else
1007 break;
1010 if (bitmap_only) {
1011 if (!entry)
1012 return NULL;
1013 if (entry->bitmap)
1014 return entry;
1017 * bitmap entry and extent entry may share same offset,
1018 * in that case, bitmap entry comes after extent entry.
1020 n = rb_next(n);
1021 if (!n)
1022 return NULL;
1023 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1024 if (entry->offset != offset)
1025 return NULL;
1027 WARN_ON(!entry->bitmap);
1028 return entry;
1029 } else if (entry) {
1030 if (entry->bitmap) {
1032 * if previous extent entry covers the offset,
1033 * we should return it instead of the bitmap entry
1035 n = &entry->offset_index;
1036 while (1) {
1037 n = rb_prev(n);
1038 if (!n)
1039 break;
1040 prev = rb_entry(n, struct btrfs_free_space,
1041 offset_index);
1042 if (!prev->bitmap) {
1043 if (prev->offset + prev->bytes > offset)
1044 entry = prev;
1045 break;
1049 return entry;
1052 if (!prev)
1053 return NULL;
1055 /* find last entry before the 'offset' */
1056 entry = prev;
1057 if (entry->offset > offset) {
1058 n = rb_prev(&entry->offset_index);
1059 if (n) {
1060 entry = rb_entry(n, struct btrfs_free_space,
1061 offset_index);
1062 BUG_ON(entry->offset > offset);
1063 } else {
1064 if (fuzzy)
1065 return entry;
1066 else
1067 return NULL;
1071 if (entry->bitmap) {
1072 n = &entry->offset_index;
1073 while (1) {
1074 n = rb_prev(n);
1075 if (!n)
1076 break;
1077 prev = rb_entry(n, struct btrfs_free_space,
1078 offset_index);
1079 if (!prev->bitmap) {
1080 if (prev->offset + prev->bytes > offset)
1081 return prev;
1082 break;
1085 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1086 return entry;
1087 } else if (entry->offset + entry->bytes > offset)
1088 return entry;
1090 if (!fuzzy)
1091 return NULL;
1093 while (1) {
1094 if (entry->bitmap) {
1095 if (entry->offset + BITS_PER_BITMAP *
1096 ctl->unit > offset)
1097 break;
1098 } else {
1099 if (entry->offset + entry->bytes > offset)
1100 break;
1103 n = rb_next(&entry->offset_index);
1104 if (!n)
1105 return NULL;
1106 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1108 return entry;
1111 static inline void
1112 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1113 struct btrfs_free_space *info)
1115 rb_erase(&info->offset_index, &ctl->free_space_offset);
1116 ctl->free_extents--;
1119 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1120 struct btrfs_free_space *info)
1122 __unlink_free_space(ctl, info);
1123 ctl->free_space -= info->bytes;
1126 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1127 struct btrfs_free_space *info)
1129 int ret = 0;
1131 BUG_ON(!info->bitmap && !info->bytes);
1132 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1133 &info->offset_index, (info->bitmap != NULL));
1134 if (ret)
1135 return ret;
1137 ctl->free_space += info->bytes;
1138 ctl->free_extents++;
1139 return ret;
1142 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1144 struct btrfs_block_group_cache *block_group = ctl->private;
1145 u64 max_bytes;
1146 u64 bitmap_bytes;
1147 u64 extent_bytes;
1148 u64 size = block_group->key.offset;
1149 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1150 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1152 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1155 * The goal is to keep the total amount of memory used per 1gb of space
1156 * at or below 32k, so we need to adjust how much memory we allow to be
1157 * used by extent based free space tracking
1159 if (size < 1024 * 1024 * 1024)
1160 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1161 else
1162 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1163 div64_u64(size, 1024 * 1024 * 1024);
1166 * we want to account for 1 more bitmap than what we have so we can make
1167 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1168 * we add more bitmaps.
1170 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1172 if (bitmap_bytes >= max_bytes) {
1173 ctl->extents_thresh = 0;
1174 return;
1178 * we want the extent entry threshold to always be at most 1/2 the maxw
1179 * bytes we can have, or whatever is less than that.
1181 extent_bytes = max_bytes - bitmap_bytes;
1182 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1184 ctl->extents_thresh =
1185 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1188 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1189 struct btrfs_free_space *info, u64 offset,
1190 u64 bytes)
1192 unsigned long start, count;
1194 start = offset_to_bit(info->offset, ctl->unit, offset);
1195 count = bytes_to_bits(bytes, ctl->unit);
1196 BUG_ON(start + count > BITS_PER_BITMAP);
1198 bitmap_clear(info->bitmap, start, count);
1200 info->bytes -= bytes;
1201 ctl->free_space -= bytes;
1204 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1205 struct btrfs_free_space *info, u64 offset,
1206 u64 bytes)
1208 unsigned long start, count;
1210 start = offset_to_bit(info->offset, ctl->unit, offset);
1211 count = bytes_to_bits(bytes, ctl->unit);
1212 BUG_ON(start + count > BITS_PER_BITMAP);
1214 bitmap_set(info->bitmap, start, count);
1216 info->bytes += bytes;
1217 ctl->free_space += bytes;
1220 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1221 struct btrfs_free_space *bitmap_info, u64 *offset,
1222 u64 *bytes)
1224 unsigned long found_bits = 0;
1225 unsigned long bits, i;
1226 unsigned long next_zero;
1228 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1229 max_t(u64, *offset, bitmap_info->offset));
1230 bits = bytes_to_bits(*bytes, ctl->unit);
1232 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1233 i < BITS_PER_BITMAP;
1234 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1235 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1236 BITS_PER_BITMAP, i);
1237 if ((next_zero - i) >= bits) {
1238 found_bits = next_zero - i;
1239 break;
1241 i = next_zero;
1244 if (found_bits) {
1245 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1246 *bytes = (u64)(found_bits) * ctl->unit;
1247 return 0;
1250 return -1;
1253 static struct btrfs_free_space *
1254 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1256 struct btrfs_free_space *entry;
1257 struct rb_node *node;
1258 int ret;
1260 if (!ctl->free_space_offset.rb_node)
1261 return NULL;
1263 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1264 if (!entry)
1265 return NULL;
1267 for (node = &entry->offset_index; node; node = rb_next(node)) {
1268 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1269 if (entry->bytes < *bytes)
1270 continue;
1272 if (entry->bitmap) {
1273 ret = search_bitmap(ctl, entry, offset, bytes);
1274 if (!ret)
1275 return entry;
1276 continue;
1279 *offset = entry->offset;
1280 *bytes = entry->bytes;
1281 return entry;
1284 return NULL;
1287 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1288 struct btrfs_free_space *info, u64 offset)
1290 info->offset = offset_to_bitmap(ctl, offset);
1291 info->bytes = 0;
1292 link_free_space(ctl, info);
1293 ctl->total_bitmaps++;
1295 ctl->op->recalc_thresholds(ctl);
1298 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1299 struct btrfs_free_space *bitmap_info)
1301 unlink_free_space(ctl, bitmap_info);
1302 kfree(bitmap_info->bitmap);
1303 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1304 ctl->total_bitmaps--;
1305 ctl->op->recalc_thresholds(ctl);
1308 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1309 struct btrfs_free_space *bitmap_info,
1310 u64 *offset, u64 *bytes)
1312 u64 end;
1313 u64 search_start, search_bytes;
1314 int ret;
1316 again:
1317 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1320 * XXX - this can go away after a few releases.
1322 * since the only user of btrfs_remove_free_space is the tree logging
1323 * stuff, and the only way to test that is under crash conditions, we
1324 * want to have this debug stuff here just in case somethings not
1325 * working. Search the bitmap for the space we are trying to use to
1326 * make sure its actually there. If its not there then we need to stop
1327 * because something has gone wrong.
1329 search_start = *offset;
1330 search_bytes = *bytes;
1331 search_bytes = min(search_bytes, end - search_start + 1);
1332 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1333 BUG_ON(ret < 0 || search_start != *offset);
1335 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1336 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1337 *bytes -= end - *offset + 1;
1338 *offset = end + 1;
1339 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1340 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1341 *bytes = 0;
1344 if (*bytes) {
1345 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1346 if (!bitmap_info->bytes)
1347 free_bitmap(ctl, bitmap_info);
1350 * no entry after this bitmap, but we still have bytes to
1351 * remove, so something has gone wrong.
1353 if (!next)
1354 return -EINVAL;
1356 bitmap_info = rb_entry(next, struct btrfs_free_space,
1357 offset_index);
1360 * if the next entry isn't a bitmap we need to return to let the
1361 * extent stuff do its work.
1363 if (!bitmap_info->bitmap)
1364 return -EAGAIN;
1367 * Ok the next item is a bitmap, but it may not actually hold
1368 * the information for the rest of this free space stuff, so
1369 * look for it, and if we don't find it return so we can try
1370 * everything over again.
1372 search_start = *offset;
1373 search_bytes = *bytes;
1374 ret = search_bitmap(ctl, bitmap_info, &search_start,
1375 &search_bytes);
1376 if (ret < 0 || search_start != *offset)
1377 return -EAGAIN;
1379 goto again;
1380 } else if (!bitmap_info->bytes)
1381 free_bitmap(ctl, bitmap_info);
1383 return 0;
1386 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1387 struct btrfs_free_space *info)
1389 struct btrfs_block_group_cache *block_group = ctl->private;
1392 * If we are below the extents threshold then we can add this as an
1393 * extent, and don't have to deal with the bitmap
1395 if (ctl->free_extents < ctl->extents_thresh) {
1397 * If this block group has some small extents we don't want to
1398 * use up all of our free slots in the cache with them, we want
1399 * to reserve them to larger extents, however if we have plent
1400 * of cache left then go ahead an dadd them, no sense in adding
1401 * the overhead of a bitmap if we don't have to.
1403 if (info->bytes <= block_group->sectorsize * 4) {
1404 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1405 return false;
1406 } else {
1407 return false;
1412 * some block groups are so tiny they can't be enveloped by a bitmap, so
1413 * don't even bother to create a bitmap for this
1415 if (BITS_PER_BITMAP * block_group->sectorsize >
1416 block_group->key.offset)
1417 return false;
1419 return true;
1422 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1423 struct btrfs_free_space *info)
1425 struct btrfs_free_space *bitmap_info;
1426 int added = 0;
1427 u64 bytes, offset, end;
1428 int ret;
1430 bytes = info->bytes;
1431 offset = info->offset;
1433 if (!ctl->op->use_bitmap(ctl, info))
1434 return 0;
1436 again:
1437 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1438 1, 0);
1439 if (!bitmap_info) {
1440 BUG_ON(added);
1441 goto new_bitmap;
1444 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1446 if (offset >= bitmap_info->offset && offset + bytes > end) {
1447 bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
1448 bytes -= end - offset;
1449 offset = end;
1450 added = 0;
1451 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
1452 bitmap_set_bits(ctl, bitmap_info, offset, bytes);
1453 bytes = 0;
1454 } else {
1455 BUG();
1458 if (!bytes) {
1459 ret = 1;
1460 goto out;
1461 } else
1462 goto again;
1464 new_bitmap:
1465 if (info && info->bitmap) {
1466 add_new_bitmap(ctl, info, offset);
1467 added = 1;
1468 info = NULL;
1469 goto again;
1470 } else {
1471 spin_unlock(&ctl->tree_lock);
1473 /* no pre-allocated info, allocate a new one */
1474 if (!info) {
1475 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1476 GFP_NOFS);
1477 if (!info) {
1478 spin_lock(&ctl->tree_lock);
1479 ret = -ENOMEM;
1480 goto out;
1484 /* allocate the bitmap */
1485 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1486 spin_lock(&ctl->tree_lock);
1487 if (!info->bitmap) {
1488 ret = -ENOMEM;
1489 goto out;
1491 goto again;
1494 out:
1495 if (info) {
1496 if (info->bitmap)
1497 kfree(info->bitmap);
1498 kmem_cache_free(btrfs_free_space_cachep, info);
1501 return ret;
1504 bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1505 struct btrfs_free_space *info, bool update_stat)
1507 struct btrfs_free_space *left_info;
1508 struct btrfs_free_space *right_info;
1509 bool merged = false;
1510 u64 offset = info->offset;
1511 u64 bytes = info->bytes;
1514 * first we want to see if there is free space adjacent to the range we
1515 * are adding, if there is remove that struct and add a new one to
1516 * cover the entire range
1518 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1519 if (right_info && rb_prev(&right_info->offset_index))
1520 left_info = rb_entry(rb_prev(&right_info->offset_index),
1521 struct btrfs_free_space, offset_index);
1522 else
1523 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1525 if (right_info && !right_info->bitmap) {
1526 if (update_stat)
1527 unlink_free_space(ctl, right_info);
1528 else
1529 __unlink_free_space(ctl, right_info);
1530 info->bytes += right_info->bytes;
1531 kmem_cache_free(btrfs_free_space_cachep, right_info);
1532 merged = true;
1535 if (left_info && !left_info->bitmap &&
1536 left_info->offset + left_info->bytes == offset) {
1537 if (update_stat)
1538 unlink_free_space(ctl, left_info);
1539 else
1540 __unlink_free_space(ctl, left_info);
1541 info->offset = left_info->offset;
1542 info->bytes += left_info->bytes;
1543 kmem_cache_free(btrfs_free_space_cachep, left_info);
1544 merged = true;
1547 return merged;
1550 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1551 u64 offset, u64 bytes)
1553 struct btrfs_free_space *info;
1554 int ret = 0;
1556 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1557 if (!info)
1558 return -ENOMEM;
1560 info->offset = offset;
1561 info->bytes = bytes;
1563 spin_lock(&ctl->tree_lock);
1565 if (try_merge_free_space(ctl, info, true))
1566 goto link;
1569 * There was no extent directly to the left or right of this new
1570 * extent then we know we're going to have to allocate a new extent, so
1571 * before we do that see if we need to drop this into a bitmap
1573 ret = insert_into_bitmap(ctl, info);
1574 if (ret < 0) {
1575 goto out;
1576 } else if (ret) {
1577 ret = 0;
1578 goto out;
1580 link:
1581 ret = link_free_space(ctl, info);
1582 if (ret)
1583 kmem_cache_free(btrfs_free_space_cachep, info);
1584 out:
1585 spin_unlock(&ctl->tree_lock);
1587 if (ret) {
1588 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1589 BUG_ON(ret == -EEXIST);
1592 return ret;
1595 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1596 u64 offset, u64 bytes)
1598 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1599 struct btrfs_free_space *info;
1600 struct btrfs_free_space *next_info = NULL;
1601 int ret = 0;
1603 spin_lock(&ctl->tree_lock);
1605 again:
1606 info = tree_search_offset(ctl, offset, 0, 0);
1607 if (!info) {
1609 * oops didn't find an extent that matched the space we wanted
1610 * to remove, look for a bitmap instead
1612 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1613 1, 0);
1614 if (!info) {
1615 WARN_ON(1);
1616 goto out_lock;
1620 if (info->bytes < bytes && rb_next(&info->offset_index)) {
1621 u64 end;
1622 next_info = rb_entry(rb_next(&info->offset_index),
1623 struct btrfs_free_space,
1624 offset_index);
1626 if (next_info->bitmap)
1627 end = next_info->offset +
1628 BITS_PER_BITMAP * ctl->unit - 1;
1629 else
1630 end = next_info->offset + next_info->bytes;
1632 if (next_info->bytes < bytes ||
1633 next_info->offset > offset || offset > end) {
1634 printk(KERN_CRIT "Found free space at %llu, size %llu,"
1635 " trying to use %llu\n",
1636 (unsigned long long)info->offset,
1637 (unsigned long long)info->bytes,
1638 (unsigned long long)bytes);
1639 WARN_ON(1);
1640 ret = -EINVAL;
1641 goto out_lock;
1644 info = next_info;
1647 if (info->bytes == bytes) {
1648 unlink_free_space(ctl, info);
1649 if (info->bitmap) {
1650 kfree(info->bitmap);
1651 ctl->total_bitmaps--;
1653 kmem_cache_free(btrfs_free_space_cachep, info);
1654 goto out_lock;
1657 if (!info->bitmap && info->offset == offset) {
1658 unlink_free_space(ctl, info);
1659 info->offset += bytes;
1660 info->bytes -= bytes;
1661 link_free_space(ctl, info);
1662 goto out_lock;
1665 if (!info->bitmap && info->offset <= offset &&
1666 info->offset + info->bytes >= offset + bytes) {
1667 u64 old_start = info->offset;
1669 * we're freeing space in the middle of the info,
1670 * this can happen during tree log replay
1672 * first unlink the old info and then
1673 * insert it again after the hole we're creating
1675 unlink_free_space(ctl, info);
1676 if (offset + bytes < info->offset + info->bytes) {
1677 u64 old_end = info->offset + info->bytes;
1679 info->offset = offset + bytes;
1680 info->bytes = old_end - info->offset;
1681 ret = link_free_space(ctl, info);
1682 WARN_ON(ret);
1683 if (ret)
1684 goto out_lock;
1685 } else {
1686 /* the hole we're creating ends at the end
1687 * of the info struct, just free the info
1689 kmem_cache_free(btrfs_free_space_cachep, info);
1691 spin_unlock(&ctl->tree_lock);
1693 /* step two, insert a new info struct to cover
1694 * anything before the hole
1696 ret = btrfs_add_free_space(block_group, old_start,
1697 offset - old_start);
1698 WARN_ON(ret);
1699 goto out;
1702 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1703 if (ret == -EAGAIN)
1704 goto again;
1705 BUG_ON(ret);
1706 out_lock:
1707 spin_unlock(&ctl->tree_lock);
1708 out:
1709 return ret;
1712 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1713 u64 bytes)
1715 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1716 struct btrfs_free_space *info;
1717 struct rb_node *n;
1718 int count = 0;
1720 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1721 info = rb_entry(n, struct btrfs_free_space, offset_index);
1722 if (info->bytes >= bytes)
1723 count++;
1724 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1725 (unsigned long long)info->offset,
1726 (unsigned long long)info->bytes,
1727 (info->bitmap) ? "yes" : "no");
1729 printk(KERN_INFO "block group has cluster?: %s\n",
1730 list_empty(&block_group->cluster_list) ? "no" : "yes");
1731 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1732 "\n", count);
1735 static struct btrfs_free_space_op free_space_op = {
1736 .recalc_thresholds = recalculate_thresholds,
1737 .use_bitmap = use_bitmap,
1740 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1742 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1744 spin_lock_init(&ctl->tree_lock);
1745 ctl->unit = block_group->sectorsize;
1746 ctl->start = block_group->key.objectid;
1747 ctl->private = block_group;
1748 ctl->op = &free_space_op;
1751 * we only want to have 32k of ram per block group for keeping
1752 * track of free space, and if we pass 1/2 of that we want to
1753 * start converting things over to using bitmaps
1755 ctl->extents_thresh = ((1024 * 32) / 2) /
1756 sizeof(struct btrfs_free_space);
1760 * for a given cluster, put all of its extents back into the free
1761 * space cache. If the block group passed doesn't match the block group
1762 * pointed to by the cluster, someone else raced in and freed the
1763 * cluster already. In that case, we just return without changing anything
1765 static int
1766 __btrfs_return_cluster_to_free_space(
1767 struct btrfs_block_group_cache *block_group,
1768 struct btrfs_free_cluster *cluster)
1770 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1771 struct btrfs_free_space *entry;
1772 struct rb_node *node;
1774 spin_lock(&cluster->lock);
1775 if (cluster->block_group != block_group)
1776 goto out;
1778 cluster->block_group = NULL;
1779 cluster->window_start = 0;
1780 list_del_init(&cluster->block_group_list);
1782 node = rb_first(&cluster->root);
1783 while (node) {
1784 bool bitmap;
1786 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1787 node = rb_next(&entry->offset_index);
1788 rb_erase(&entry->offset_index, &cluster->root);
1790 bitmap = (entry->bitmap != NULL);
1791 if (!bitmap)
1792 try_merge_free_space(ctl, entry, false);
1793 tree_insert_offset(&ctl->free_space_offset,
1794 entry->offset, &entry->offset_index, bitmap);
1796 cluster->root = RB_ROOT;
1798 out:
1799 spin_unlock(&cluster->lock);
1800 btrfs_put_block_group(block_group);
1801 return 0;
1804 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
1806 struct btrfs_free_space *info;
1807 struct rb_node *node;
1809 spin_lock(&ctl->tree_lock);
1810 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1811 info = rb_entry(node, struct btrfs_free_space, offset_index);
1812 unlink_free_space(ctl, info);
1813 kfree(info->bitmap);
1814 kmem_cache_free(btrfs_free_space_cachep, info);
1815 if (need_resched()) {
1816 spin_unlock(&ctl->tree_lock);
1817 cond_resched();
1818 spin_lock(&ctl->tree_lock);
1821 spin_unlock(&ctl->tree_lock);
1824 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1826 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1827 struct btrfs_free_cluster *cluster;
1828 struct list_head *head;
1830 spin_lock(&ctl->tree_lock);
1831 while ((head = block_group->cluster_list.next) !=
1832 &block_group->cluster_list) {
1833 cluster = list_entry(head, struct btrfs_free_cluster,
1834 block_group_list);
1836 WARN_ON(cluster->block_group != block_group);
1837 __btrfs_return_cluster_to_free_space(block_group, cluster);
1838 if (need_resched()) {
1839 spin_unlock(&ctl->tree_lock);
1840 cond_resched();
1841 spin_lock(&ctl->tree_lock);
1844 spin_unlock(&ctl->tree_lock);
1846 __btrfs_remove_free_space_cache(ctl);
1849 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1850 u64 offset, u64 bytes, u64 empty_size)
1852 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1853 struct btrfs_free_space *entry = NULL;
1854 u64 bytes_search = bytes + empty_size;
1855 u64 ret = 0;
1857 spin_lock(&ctl->tree_lock);
1858 entry = find_free_space(ctl, &offset, &bytes_search);
1859 if (!entry)
1860 goto out;
1862 ret = offset;
1863 if (entry->bitmap) {
1864 bitmap_clear_bits(ctl, entry, offset, bytes);
1865 if (!entry->bytes)
1866 free_bitmap(ctl, entry);
1867 } else {
1868 unlink_free_space(ctl, entry);
1869 entry->offset += bytes;
1870 entry->bytes -= bytes;
1871 if (!entry->bytes)
1872 kmem_cache_free(btrfs_free_space_cachep, entry);
1873 else
1874 link_free_space(ctl, entry);
1877 out:
1878 spin_unlock(&ctl->tree_lock);
1880 return ret;
1884 * given a cluster, put all of its extents back into the free space
1885 * cache. If a block group is passed, this function will only free
1886 * a cluster that belongs to the passed block group.
1888 * Otherwise, it'll get a reference on the block group pointed to by the
1889 * cluster and remove the cluster from it.
1891 int btrfs_return_cluster_to_free_space(
1892 struct btrfs_block_group_cache *block_group,
1893 struct btrfs_free_cluster *cluster)
1895 struct btrfs_free_space_ctl *ctl;
1896 int ret;
1898 /* first, get a safe pointer to the block group */
1899 spin_lock(&cluster->lock);
1900 if (!block_group) {
1901 block_group = cluster->block_group;
1902 if (!block_group) {
1903 spin_unlock(&cluster->lock);
1904 return 0;
1906 } else if (cluster->block_group != block_group) {
1907 /* someone else has already freed it don't redo their work */
1908 spin_unlock(&cluster->lock);
1909 return 0;
1911 atomic_inc(&block_group->count);
1912 spin_unlock(&cluster->lock);
1914 ctl = block_group->free_space_ctl;
1916 /* now return any extents the cluster had on it */
1917 spin_lock(&ctl->tree_lock);
1918 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1919 spin_unlock(&ctl->tree_lock);
1921 /* finally drop our ref */
1922 btrfs_put_block_group(block_group);
1923 return ret;
1926 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1927 struct btrfs_free_cluster *cluster,
1928 struct btrfs_free_space *entry,
1929 u64 bytes, u64 min_start)
1931 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1932 int err;
1933 u64 search_start = cluster->window_start;
1934 u64 search_bytes = bytes;
1935 u64 ret = 0;
1937 search_start = min_start;
1938 search_bytes = bytes;
1940 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
1941 if (err)
1942 return 0;
1944 ret = search_start;
1945 bitmap_clear_bits(ctl, entry, ret, bytes);
1947 return ret;
1951 * given a cluster, try to allocate 'bytes' from it, returns 0
1952 * if it couldn't find anything suitably large, or a logical disk offset
1953 * if things worked out
1955 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1956 struct btrfs_free_cluster *cluster, u64 bytes,
1957 u64 min_start)
1959 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1960 struct btrfs_free_space *entry = NULL;
1961 struct rb_node *node;
1962 u64 ret = 0;
1964 spin_lock(&cluster->lock);
1965 if (bytes > cluster->max_size)
1966 goto out;
1968 if (cluster->block_group != block_group)
1969 goto out;
1971 node = rb_first(&cluster->root);
1972 if (!node)
1973 goto out;
1975 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1976 while(1) {
1977 if (entry->bytes < bytes ||
1978 (!entry->bitmap && entry->offset < min_start)) {
1979 struct rb_node *node;
1981 node = rb_next(&entry->offset_index);
1982 if (!node)
1983 break;
1984 entry = rb_entry(node, struct btrfs_free_space,
1985 offset_index);
1986 continue;
1989 if (entry->bitmap) {
1990 ret = btrfs_alloc_from_bitmap(block_group,
1991 cluster, entry, bytes,
1992 min_start);
1993 if (ret == 0) {
1994 struct rb_node *node;
1995 node = rb_next(&entry->offset_index);
1996 if (!node)
1997 break;
1998 entry = rb_entry(node, struct btrfs_free_space,
1999 offset_index);
2000 continue;
2002 } else {
2004 ret = entry->offset;
2006 entry->offset += bytes;
2007 entry->bytes -= bytes;
2010 if (entry->bytes == 0)
2011 rb_erase(&entry->offset_index, &cluster->root);
2012 break;
2014 out:
2015 spin_unlock(&cluster->lock);
2017 if (!ret)
2018 return 0;
2020 spin_lock(&ctl->tree_lock);
2022 ctl->free_space -= bytes;
2023 if (entry->bytes == 0) {
2024 ctl->free_extents--;
2025 if (entry->bitmap) {
2026 kfree(entry->bitmap);
2027 ctl->total_bitmaps--;
2028 ctl->op->recalc_thresholds(ctl);
2030 kmem_cache_free(btrfs_free_space_cachep, entry);
2033 spin_unlock(&ctl->tree_lock);
2035 return ret;
2038 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2039 struct btrfs_free_space *entry,
2040 struct btrfs_free_cluster *cluster,
2041 u64 offset, u64 bytes, u64 min_bytes)
2043 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2044 unsigned long next_zero;
2045 unsigned long i;
2046 unsigned long search_bits;
2047 unsigned long total_bits;
2048 unsigned long found_bits;
2049 unsigned long start = 0;
2050 unsigned long total_found = 0;
2051 int ret;
2052 bool found = false;
2054 i = offset_to_bit(entry->offset, block_group->sectorsize,
2055 max_t(u64, offset, entry->offset));
2056 search_bits = bytes_to_bits(bytes, block_group->sectorsize);
2057 total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2059 again:
2060 found_bits = 0;
2061 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2062 i < BITS_PER_BITMAP;
2063 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2064 next_zero = find_next_zero_bit(entry->bitmap,
2065 BITS_PER_BITMAP, i);
2066 if (next_zero - i >= search_bits) {
2067 found_bits = next_zero - i;
2068 break;
2070 i = next_zero;
2073 if (!found_bits)
2074 return -ENOSPC;
2076 if (!found) {
2077 start = i;
2078 found = true;
2081 total_found += found_bits;
2083 if (cluster->max_size < found_bits * block_group->sectorsize)
2084 cluster->max_size = found_bits * block_group->sectorsize;
2086 if (total_found < total_bits) {
2087 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
2088 if (i - start > total_bits * 2) {
2089 total_found = 0;
2090 cluster->max_size = 0;
2091 found = false;
2093 goto again;
2096 cluster->window_start = start * block_group->sectorsize +
2097 entry->offset;
2098 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2099 ret = tree_insert_offset(&cluster->root, entry->offset,
2100 &entry->offset_index, 1);
2101 BUG_ON(ret);
2103 return 0;
2107 * This searches the block group for just extents to fill the cluster with.
2109 static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2110 struct btrfs_free_cluster *cluster,
2111 u64 offset, u64 bytes, u64 min_bytes)
2113 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2114 struct btrfs_free_space *first = NULL;
2115 struct btrfs_free_space *entry = NULL;
2116 struct btrfs_free_space *prev = NULL;
2117 struct btrfs_free_space *last;
2118 struct rb_node *node;
2119 u64 window_start;
2120 u64 window_free;
2121 u64 max_extent;
2122 u64 max_gap = 128 * 1024;
2124 entry = tree_search_offset(ctl, offset, 0, 1);
2125 if (!entry)
2126 return -ENOSPC;
2129 * We don't want bitmaps, so just move along until we find a normal
2130 * extent entry.
2132 while (entry->bitmap) {
2133 node = rb_next(&entry->offset_index);
2134 if (!node)
2135 return -ENOSPC;
2136 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2139 window_start = entry->offset;
2140 window_free = entry->bytes;
2141 max_extent = entry->bytes;
2142 first = entry;
2143 last = entry;
2144 prev = entry;
2146 while (window_free <= min_bytes) {
2147 node = rb_next(&entry->offset_index);
2148 if (!node)
2149 return -ENOSPC;
2150 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2152 if (entry->bitmap)
2153 continue;
2155 * we haven't filled the empty size and the window is
2156 * very large. reset and try again
2158 if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
2159 entry->offset - window_start > (min_bytes * 2)) {
2160 first = entry;
2161 window_start = entry->offset;
2162 window_free = entry->bytes;
2163 last = entry;
2164 max_extent = entry->bytes;
2165 } else {
2166 last = entry;
2167 window_free += entry->bytes;
2168 if (entry->bytes > max_extent)
2169 max_extent = entry->bytes;
2171 prev = entry;
2174 cluster->window_start = first->offset;
2176 node = &first->offset_index;
2179 * now we've found our entries, pull them out of the free space
2180 * cache and put them into the cluster rbtree
2182 do {
2183 int ret;
2185 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2186 node = rb_next(&entry->offset_index);
2187 if (entry->bitmap)
2188 continue;
2190 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2191 ret = tree_insert_offset(&cluster->root, entry->offset,
2192 &entry->offset_index, 0);
2193 BUG_ON(ret);
2194 } while (node && entry != last);
2196 cluster->max_size = max_extent;
2198 return 0;
2202 * This specifically looks for bitmaps that may work in the cluster, we assume
2203 * that we have already failed to find extents that will work.
2205 static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2206 struct btrfs_free_cluster *cluster,
2207 u64 offset, u64 bytes, u64 min_bytes)
2209 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2210 struct btrfs_free_space *entry;
2211 struct rb_node *node;
2212 int ret = -ENOSPC;
2214 if (ctl->total_bitmaps == 0)
2215 return -ENOSPC;
2217 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2218 if (!entry)
2219 return -ENOSPC;
2221 node = &entry->offset_index;
2222 do {
2223 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2224 node = rb_next(&entry->offset_index);
2225 if (!entry->bitmap)
2226 continue;
2227 if (entry->bytes < min_bytes)
2228 continue;
2229 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2230 bytes, min_bytes);
2231 } while (ret && node);
2233 return ret;
2237 * here we try to find a cluster of blocks in a block group. The goal
2238 * is to find at least bytes free and up to empty_size + bytes free.
2239 * We might not find them all in one contiguous area.
2241 * returns zero and sets up cluster if things worked out, otherwise
2242 * it returns -enospc
2244 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2245 struct btrfs_root *root,
2246 struct btrfs_block_group_cache *block_group,
2247 struct btrfs_free_cluster *cluster,
2248 u64 offset, u64 bytes, u64 empty_size)
2250 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2251 u64 min_bytes;
2252 int ret;
2254 /* for metadata, allow allocates with more holes */
2255 if (btrfs_test_opt(root, SSD_SPREAD)) {
2256 min_bytes = bytes + empty_size;
2257 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2259 * we want to do larger allocations when we are
2260 * flushing out the delayed refs, it helps prevent
2261 * making more work as we go along.
2263 if (trans->transaction->delayed_refs.flushing)
2264 min_bytes = max(bytes, (bytes + empty_size) >> 1);
2265 else
2266 min_bytes = max(bytes, (bytes + empty_size) >> 4);
2267 } else
2268 min_bytes = max(bytes, (bytes + empty_size) >> 2);
2270 spin_lock(&ctl->tree_lock);
2273 * If we know we don't have enough space to make a cluster don't even
2274 * bother doing all the work to try and find one.
2276 if (ctl->free_space < min_bytes) {
2277 spin_unlock(&ctl->tree_lock);
2278 return -ENOSPC;
2281 spin_lock(&cluster->lock);
2283 /* someone already found a cluster, hooray */
2284 if (cluster->block_group) {
2285 ret = 0;
2286 goto out;
2289 ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
2290 min_bytes);
2291 if (ret)
2292 ret = setup_cluster_bitmap(block_group, cluster, offset,
2293 bytes, min_bytes);
2295 if (!ret) {
2296 atomic_inc(&block_group->count);
2297 list_add_tail(&cluster->block_group_list,
2298 &block_group->cluster_list);
2299 cluster->block_group = block_group;
2301 out:
2302 spin_unlock(&cluster->lock);
2303 spin_unlock(&ctl->tree_lock);
2305 return ret;
2309 * simple code to zero out a cluster
2311 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2313 spin_lock_init(&cluster->lock);
2314 spin_lock_init(&cluster->refill_lock);
2315 cluster->root = RB_ROOT;
2316 cluster->max_size = 0;
2317 INIT_LIST_HEAD(&cluster->block_group_list);
2318 cluster->block_group = NULL;
2321 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2322 u64 *trimmed, u64 start, u64 end, u64 minlen)
2324 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2325 struct btrfs_free_space *entry = NULL;
2326 struct btrfs_fs_info *fs_info = block_group->fs_info;
2327 u64 bytes = 0;
2328 u64 actually_trimmed;
2329 int ret = 0;
2331 *trimmed = 0;
2333 while (start < end) {
2334 spin_lock(&ctl->tree_lock);
2336 if (ctl->free_space < minlen) {
2337 spin_unlock(&ctl->tree_lock);
2338 break;
2341 entry = tree_search_offset(ctl, start, 0, 1);
2342 if (!entry)
2343 entry = tree_search_offset(ctl,
2344 offset_to_bitmap(ctl, start),
2345 1, 1);
2347 if (!entry || entry->offset >= end) {
2348 spin_unlock(&ctl->tree_lock);
2349 break;
2352 if (entry->bitmap) {
2353 ret = search_bitmap(ctl, entry, &start, &bytes);
2354 if (!ret) {
2355 if (start >= end) {
2356 spin_unlock(&ctl->tree_lock);
2357 break;
2359 bytes = min(bytes, end - start);
2360 bitmap_clear_bits(ctl, entry, start, bytes);
2361 if (entry->bytes == 0)
2362 free_bitmap(ctl, entry);
2363 } else {
2364 start = entry->offset + BITS_PER_BITMAP *
2365 block_group->sectorsize;
2366 spin_unlock(&ctl->tree_lock);
2367 ret = 0;
2368 continue;
2370 } else {
2371 start = entry->offset;
2372 bytes = min(entry->bytes, end - start);
2373 unlink_free_space(ctl, entry);
2374 kfree(entry);
2377 spin_unlock(&ctl->tree_lock);
2379 if (bytes >= minlen) {
2380 int update_ret;
2381 update_ret = btrfs_update_reserved_bytes(block_group,
2382 bytes, 1, 1);
2384 ret = btrfs_error_discard_extent(fs_info->extent_root,
2385 start,
2386 bytes,
2387 &actually_trimmed);
2389 btrfs_add_free_space(block_group, start, bytes);
2390 if (!update_ret)
2391 btrfs_update_reserved_bytes(block_group,
2392 bytes, 0, 1);
2394 if (ret)
2395 break;
2396 *trimmed += actually_trimmed;
2398 start += bytes;
2399 bytes = 0;
2401 if (fatal_signal_pending(current)) {
2402 ret = -ERESTARTSYS;
2403 break;
2406 cond_resched();
2409 return ret;
2413 * Find the left-most item in the cache tree, and then return the
2414 * smallest inode number in the item.
2416 * Note: the returned inode number may not be the smallest one in
2417 * the tree, if the left-most item is a bitmap.
2419 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2421 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2422 struct btrfs_free_space *entry = NULL;
2423 u64 ino = 0;
2425 spin_lock(&ctl->tree_lock);
2427 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2428 goto out;
2430 entry = rb_entry(rb_first(&ctl->free_space_offset),
2431 struct btrfs_free_space, offset_index);
2433 if (!entry->bitmap) {
2434 ino = entry->offset;
2436 unlink_free_space(ctl, entry);
2437 entry->offset++;
2438 entry->bytes--;
2439 if (!entry->bytes)
2440 kmem_cache_free(btrfs_free_space_cachep, entry);
2441 else
2442 link_free_space(ctl, entry);
2443 } else {
2444 u64 offset = 0;
2445 u64 count = 1;
2446 int ret;
2448 ret = search_bitmap(ctl, entry, &offset, &count);
2449 BUG_ON(ret);
2451 ino = offset;
2452 bitmap_clear_bits(ctl, entry, offset, 1);
2453 if (entry->bytes == 0)
2454 free_bitmap(ctl, entry);
2456 out:
2457 spin_unlock(&ctl->tree_lock);
2459 return ino;