Btrfs: Update and fix mount -o nodatacow
[linux-2.6/btrfs-unstable.git] / fs / btrfs / extent-tree.c
blobfe1ddbd2bfd68fb439477f4320f82461d4fe7dc0
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include "hash.h"
22 #include "crc32c.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27 #include "volumes.h"
28 #include "locking.h"
29 #include "ref-cache.h"
31 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
32 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
33 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
35 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
37 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
38 btrfs_root *extent_root);
39 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
40 btrfs_root *extent_root);
41 static struct btrfs_block_group_cache *
42 __btrfs_find_block_group(struct btrfs_root *root,
43 struct btrfs_block_group_cache *hint,
44 u64 search_start, int data, int owner);
46 void maybe_lock_mutex(struct btrfs_root *root)
48 if (root != root->fs_info->extent_root &&
49 root != root->fs_info->chunk_root &&
50 root != root->fs_info->dev_root) {
51 mutex_lock(&root->fs_info->alloc_mutex);
55 void maybe_unlock_mutex(struct btrfs_root *root)
57 if (root != root->fs_info->extent_root &&
58 root != root->fs_info->chunk_root &&
59 root != root->fs_info->dev_root) {
60 mutex_unlock(&root->fs_info->alloc_mutex);
64 static int cache_block_group(struct btrfs_root *root,
65 struct btrfs_block_group_cache *block_group)
67 struct btrfs_path *path;
68 int ret;
69 struct btrfs_key key;
70 struct extent_buffer *leaf;
71 struct extent_io_tree *free_space_cache;
72 int slot;
73 u64 last = 0;
74 u64 hole_size;
75 u64 first_free;
76 int found = 0;
78 if (!block_group)
79 return 0;
81 root = root->fs_info->extent_root;
82 free_space_cache = &root->fs_info->free_space_cache;
84 if (block_group->cached)
85 return 0;
87 path = btrfs_alloc_path();
88 if (!path)
89 return -ENOMEM;
91 path->reada = 2;
93 * we get into deadlocks with paths held by callers of this function.
94 * since the alloc_mutex is protecting things right now, just
95 * skip the locking here
97 path->skip_locking = 1;
98 first_free = block_group->key.objectid;
99 key.objectid = block_group->key.objectid;
100 key.offset = 0;
101 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
102 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
103 if (ret < 0)
104 return ret;
105 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
106 if (ret < 0)
107 return ret;
108 if (ret == 0) {
109 leaf = path->nodes[0];
110 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
111 if (key.objectid + key.offset > first_free)
112 first_free = key.objectid + key.offset;
114 while(1) {
115 leaf = path->nodes[0];
116 slot = path->slots[0];
117 if (slot >= btrfs_header_nritems(leaf)) {
118 ret = btrfs_next_leaf(root, path);
119 if (ret < 0)
120 goto err;
121 if (ret == 0) {
122 continue;
123 } else {
124 break;
127 btrfs_item_key_to_cpu(leaf, &key, slot);
128 if (key.objectid < block_group->key.objectid) {
129 goto next;
131 if (key.objectid >= block_group->key.objectid +
132 block_group->key.offset) {
133 break;
136 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
137 if (!found) {
138 last = first_free;
139 found = 1;
141 if (key.objectid > last) {
142 hole_size = key.objectid - last;
143 set_extent_dirty(free_space_cache, last,
144 last + hole_size - 1,
145 GFP_NOFS);
147 last = key.objectid + key.offset;
149 next:
150 path->slots[0]++;
153 if (!found)
154 last = first_free;
155 if (block_group->key.objectid +
156 block_group->key.offset > last) {
157 hole_size = block_group->key.objectid +
158 block_group->key.offset - last;
159 set_extent_dirty(free_space_cache, last,
160 last + hole_size - 1, GFP_NOFS);
162 block_group->cached = 1;
163 err:
164 btrfs_free_path(path);
165 return 0;
168 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
169 btrfs_fs_info *info,
170 u64 bytenr)
172 struct extent_io_tree *block_group_cache;
173 struct btrfs_block_group_cache *block_group = NULL;
174 u64 ptr;
175 u64 start;
176 u64 end;
177 int ret;
179 bytenr = max_t(u64, bytenr,
180 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
181 block_group_cache = &info->block_group_cache;
182 ret = find_first_extent_bit(block_group_cache,
183 bytenr, &start, &end,
184 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
185 BLOCK_GROUP_SYSTEM);
186 if (ret) {
187 return NULL;
189 ret = get_state_private(block_group_cache, start, &ptr);
190 if (ret)
191 return NULL;
193 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
194 return block_group;
197 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
198 btrfs_fs_info *info,
199 u64 bytenr)
201 struct extent_io_tree *block_group_cache;
202 struct btrfs_block_group_cache *block_group = NULL;
203 u64 ptr;
204 u64 start;
205 u64 end;
206 int ret;
208 bytenr = max_t(u64, bytenr,
209 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
210 block_group_cache = &info->block_group_cache;
211 ret = find_first_extent_bit(block_group_cache,
212 bytenr, &start, &end,
213 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
214 BLOCK_GROUP_SYSTEM);
215 if (ret) {
216 return NULL;
218 ret = get_state_private(block_group_cache, start, &ptr);
219 if (ret)
220 return NULL;
222 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
223 if (block_group->key.objectid <= bytenr && bytenr <
224 block_group->key.objectid + block_group->key.offset)
225 return block_group;
226 return NULL;
229 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
231 return (cache->flags & bits) == bits;
234 static int noinline find_search_start(struct btrfs_root *root,
235 struct btrfs_block_group_cache **cache_ret,
236 u64 *start_ret, u64 num, int data)
238 int ret;
239 struct btrfs_block_group_cache *cache = *cache_ret;
240 struct extent_io_tree *free_space_cache;
241 struct extent_state *state;
242 u64 last;
243 u64 start = 0;
244 u64 cache_miss = 0;
245 u64 total_fs_bytes;
246 u64 search_start = *start_ret;
247 int wrapped = 0;
249 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
250 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
251 free_space_cache = &root->fs_info->free_space_cache;
253 if (!cache)
254 goto out;
256 again:
257 ret = cache_block_group(root, cache);
258 if (ret) {
259 goto out;
262 last = max(search_start, cache->key.objectid);
263 if (!block_group_bits(cache, data) || cache->ro)
264 goto new_group;
266 spin_lock_irq(&free_space_cache->lock);
267 state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
268 while(1) {
269 if (!state) {
270 if (!cache_miss)
271 cache_miss = last;
272 spin_unlock_irq(&free_space_cache->lock);
273 goto new_group;
276 start = max(last, state->start);
277 last = state->end + 1;
278 if (last - start < num) {
279 do {
280 state = extent_state_next(state);
281 } while(state && !(state->state & EXTENT_DIRTY));
282 continue;
284 spin_unlock_irq(&free_space_cache->lock);
285 if (cache->ro) {
286 goto new_group;
288 if (start + num > cache->key.objectid + cache->key.offset)
289 goto new_group;
290 if (!block_group_bits(cache, data)) {
291 printk("block group bits don't match %Lu %d\n", cache->flags, data);
293 *start_ret = start;
294 return 0;
296 out:
297 cache = btrfs_lookup_block_group(root->fs_info, search_start);
298 if (!cache) {
299 printk("Unable to find block group for %Lu\n", search_start);
300 WARN_ON(1);
302 return -ENOSPC;
304 new_group:
305 last = cache->key.objectid + cache->key.offset;
306 wrapped:
307 cache = btrfs_lookup_first_block_group(root->fs_info, last);
308 if (!cache || cache->key.objectid >= total_fs_bytes) {
309 no_cache:
310 if (!wrapped) {
311 wrapped = 1;
312 last = search_start;
313 goto wrapped;
315 goto out;
317 if (cache_miss && !cache->cached) {
318 cache_block_group(root, cache);
319 last = cache_miss;
320 cache = btrfs_lookup_first_block_group(root->fs_info, last);
322 cache_miss = 0;
323 cache = btrfs_find_block_group(root, cache, last, data, 0);
324 if (!cache)
325 goto no_cache;
326 *cache_ret = cache;
327 goto again;
330 static u64 div_factor(u64 num, int factor)
332 if (factor == 10)
333 return num;
334 num *= factor;
335 do_div(num, 10);
336 return num;
339 static int block_group_state_bits(u64 flags)
341 int bits = 0;
342 if (flags & BTRFS_BLOCK_GROUP_DATA)
343 bits |= BLOCK_GROUP_DATA;
344 if (flags & BTRFS_BLOCK_GROUP_METADATA)
345 bits |= BLOCK_GROUP_METADATA;
346 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
347 bits |= BLOCK_GROUP_SYSTEM;
348 return bits;
351 static struct btrfs_block_group_cache *
352 __btrfs_find_block_group(struct btrfs_root *root,
353 struct btrfs_block_group_cache *hint,
354 u64 search_start, int data, int owner)
356 struct btrfs_block_group_cache *cache;
357 struct extent_io_tree *block_group_cache;
358 struct btrfs_block_group_cache *found_group = NULL;
359 struct btrfs_fs_info *info = root->fs_info;
360 u64 used;
361 u64 last = 0;
362 u64 start;
363 u64 end;
364 u64 free_check;
365 u64 ptr;
366 int bit;
367 int ret;
368 int full_search = 0;
369 int factor = 10;
370 int wrapped = 0;
372 block_group_cache = &info->block_group_cache;
374 if (data & BTRFS_BLOCK_GROUP_METADATA)
375 factor = 9;
377 bit = block_group_state_bits(data);
379 if (search_start) {
380 struct btrfs_block_group_cache *shint;
381 shint = btrfs_lookup_first_block_group(info, search_start);
382 if (shint && block_group_bits(shint, data) && !shint->ro) {
383 spin_lock(&shint->lock);
384 used = btrfs_block_group_used(&shint->item);
385 if (used + shint->pinned <
386 div_factor(shint->key.offset, factor)) {
387 spin_unlock(&shint->lock);
388 return shint;
390 spin_unlock(&shint->lock);
393 if (hint && !hint->ro && block_group_bits(hint, data)) {
394 spin_lock(&hint->lock);
395 used = btrfs_block_group_used(&hint->item);
396 if (used + hint->pinned <
397 div_factor(hint->key.offset, factor)) {
398 spin_unlock(&hint->lock);
399 return hint;
401 spin_unlock(&hint->lock);
402 last = hint->key.objectid + hint->key.offset;
403 } else {
404 if (hint)
405 last = max(hint->key.objectid, search_start);
406 else
407 last = search_start;
409 again:
410 while(1) {
411 ret = find_first_extent_bit(block_group_cache, last,
412 &start, &end, bit);
413 if (ret)
414 break;
416 ret = get_state_private(block_group_cache, start, &ptr);
417 if (ret) {
418 last = end + 1;
419 continue;
422 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
423 spin_lock(&cache->lock);
424 last = cache->key.objectid + cache->key.offset;
425 used = btrfs_block_group_used(&cache->item);
427 if (!cache->ro && block_group_bits(cache, data)) {
428 free_check = div_factor(cache->key.offset, factor);
429 if (used + cache->pinned < free_check) {
430 found_group = cache;
431 spin_unlock(&cache->lock);
432 goto found;
435 spin_unlock(&cache->lock);
436 cond_resched();
438 if (!wrapped) {
439 last = search_start;
440 wrapped = 1;
441 goto again;
443 if (!full_search && factor < 10) {
444 last = search_start;
445 full_search = 1;
446 factor = 10;
447 goto again;
449 found:
450 return found_group;
453 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
454 struct btrfs_block_group_cache
455 *hint, u64 search_start,
456 int data, int owner)
459 struct btrfs_block_group_cache *ret;
460 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
461 return ret;
463 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
464 u64 owner, u64 owner_offset)
466 u32 high_crc = ~(u32)0;
467 u32 low_crc = ~(u32)0;
468 __le64 lenum;
469 lenum = cpu_to_le64(root_objectid);
470 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
471 lenum = cpu_to_le64(ref_generation);
472 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
473 if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
474 lenum = cpu_to_le64(owner);
475 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
476 lenum = cpu_to_le64(owner_offset);
477 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
479 return ((u64)high_crc << 32) | (u64)low_crc;
482 static int match_extent_ref(struct extent_buffer *leaf,
483 struct btrfs_extent_ref *disk_ref,
484 struct btrfs_extent_ref *cpu_ref)
486 int ret;
487 int len;
489 if (cpu_ref->objectid)
490 len = sizeof(*cpu_ref);
491 else
492 len = 2 * sizeof(u64);
493 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
494 len);
495 return ret == 0;
498 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
499 struct btrfs_root *root,
500 struct btrfs_path *path, u64 bytenr,
501 u64 root_objectid,
502 u64 ref_generation, u64 owner,
503 u64 owner_offset, int del)
505 u64 hash;
506 struct btrfs_key key;
507 struct btrfs_key found_key;
508 struct btrfs_extent_ref ref;
509 struct extent_buffer *leaf;
510 struct btrfs_extent_ref *disk_ref;
511 int ret;
512 int ret2;
514 btrfs_set_stack_ref_root(&ref, root_objectid);
515 btrfs_set_stack_ref_generation(&ref, ref_generation);
516 btrfs_set_stack_ref_objectid(&ref, owner);
517 btrfs_set_stack_ref_offset(&ref, owner_offset);
519 hash = hash_extent_ref(root_objectid, ref_generation, owner,
520 owner_offset);
521 key.offset = hash;
522 key.objectid = bytenr;
523 key.type = BTRFS_EXTENT_REF_KEY;
525 while (1) {
526 ret = btrfs_search_slot(trans, root, &key, path,
527 del ? -1 : 0, del);
528 if (ret < 0)
529 goto out;
530 leaf = path->nodes[0];
531 if (ret != 0) {
532 u32 nritems = btrfs_header_nritems(leaf);
533 if (path->slots[0] >= nritems) {
534 ret2 = btrfs_next_leaf(root, path);
535 if (ret2)
536 goto out;
537 leaf = path->nodes[0];
539 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
540 if (found_key.objectid != bytenr ||
541 found_key.type != BTRFS_EXTENT_REF_KEY)
542 goto out;
543 key.offset = found_key.offset;
544 if (del) {
545 btrfs_release_path(root, path);
546 continue;
549 disk_ref = btrfs_item_ptr(path->nodes[0],
550 path->slots[0],
551 struct btrfs_extent_ref);
552 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
553 ret = 0;
554 goto out;
556 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
557 key.offset = found_key.offset + 1;
558 btrfs_release_path(root, path);
560 out:
561 return ret;
565 * Back reference rules. Back refs have three main goals:
567 * 1) differentiate between all holders of references to an extent so that
568 * when a reference is dropped we can make sure it was a valid reference
569 * before freeing the extent.
571 * 2) Provide enough information to quickly find the holders of an extent
572 * if we notice a given block is corrupted or bad.
574 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
575 * maintenance. This is actually the same as #2, but with a slightly
576 * different use case.
578 * File extents can be referenced by:
580 * - multiple snapshots, subvolumes, or different generations in one subvol
581 * - different files inside a single subvolume (in theory, not implemented yet)
582 * - different offsets inside a file (bookend extents in file.c)
584 * The extent ref structure has fields for:
586 * - Objectid of the subvolume root
587 * - Generation number of the tree holding the reference
588 * - objectid of the file holding the reference
589 * - offset in the file corresponding to the key holding the reference
591 * When a file extent is allocated the fields are filled in:
592 * (root_key.objectid, trans->transid, inode objectid, offset in file)
594 * When a leaf is cow'd new references are added for every file extent found
595 * in the leaf. It looks the same as the create case, but trans->transid
596 * will be different when the block is cow'd.
598 * (root_key.objectid, trans->transid, inode objectid, offset in file)
600 * When a file extent is removed either during snapshot deletion or file
601 * truncation, the corresponding back reference is found
602 * by searching for:
604 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
605 * inode objectid, offset in file)
607 * Btree extents can be referenced by:
609 * - Different subvolumes
610 * - Different generations of the same subvolume
612 * Storing sufficient information for a full reverse mapping of a btree
613 * block would require storing the lowest key of the block in the backref,
614 * and it would require updating that lowest key either before write out or
615 * every time it changed. Instead, the objectid of the lowest key is stored
616 * along with the level of the tree block. This provides a hint
617 * about where in the btree the block can be found. Searches through the
618 * btree only need to look for a pointer to that block, so they stop one
619 * level higher than the level recorded in the backref.
621 * Some btrees do not do reference counting on their extents. These
622 * include the extent tree and the tree of tree roots. Backrefs for these
623 * trees always have a generation of zero.
625 * When a tree block is created, back references are inserted:
627 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
629 * When a tree block is cow'd in a reference counted root,
630 * new back references are added for all the blocks it points to.
631 * These are of the form (trans->transid will have increased since creation):
633 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
635 * Because the lowest_key_objectid and the level are just hints
636 * they are not used when backrefs are deleted. When a backref is deleted:
638 * if backref was for a tree root:
639 * root_objectid = root->root_key.objectid
640 * else
641 * root_objectid = btrfs_header_owner(parent)
643 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
645 * Back Reference Key hashing:
647 * Back references have four fields, each 64 bits long. Unfortunately,
648 * This is hashed into a single 64 bit number and placed into the key offset.
649 * The key objectid corresponds to the first byte in the extent, and the
650 * key type is set to BTRFS_EXTENT_REF_KEY
652 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
653 struct btrfs_root *root,
654 struct btrfs_path *path, u64 bytenr,
655 u64 root_objectid, u64 ref_generation,
656 u64 owner, u64 owner_offset)
658 u64 hash;
659 struct btrfs_key key;
660 struct btrfs_extent_ref ref;
661 struct btrfs_extent_ref *disk_ref;
662 int ret;
664 btrfs_set_stack_ref_root(&ref, root_objectid);
665 btrfs_set_stack_ref_generation(&ref, ref_generation);
666 btrfs_set_stack_ref_objectid(&ref, owner);
667 btrfs_set_stack_ref_offset(&ref, owner_offset);
669 hash = hash_extent_ref(root_objectid, ref_generation, owner,
670 owner_offset);
671 key.offset = hash;
672 key.objectid = bytenr;
673 key.type = BTRFS_EXTENT_REF_KEY;
675 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
676 while (ret == -EEXIST) {
677 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
678 struct btrfs_extent_ref);
679 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
680 goto out;
681 key.offset++;
682 btrfs_release_path(root, path);
683 ret = btrfs_insert_empty_item(trans, root, path, &key,
684 sizeof(ref));
686 if (ret)
687 goto out;
688 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
689 struct btrfs_extent_ref);
690 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
691 sizeof(ref));
692 btrfs_mark_buffer_dirty(path->nodes[0]);
693 out:
694 btrfs_release_path(root, path);
695 return ret;
698 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
699 struct btrfs_root *root,
700 u64 bytenr, u64 num_bytes,
701 u64 root_objectid, u64 ref_generation,
702 u64 owner, u64 owner_offset)
704 struct btrfs_path *path;
705 int ret;
706 struct btrfs_key key;
707 struct extent_buffer *l;
708 struct btrfs_extent_item *item;
709 u32 refs;
711 WARN_ON(num_bytes < root->sectorsize);
712 path = btrfs_alloc_path();
713 if (!path)
714 return -ENOMEM;
716 path->reada = 1;
717 key.objectid = bytenr;
718 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
719 key.offset = num_bytes;
720 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
721 0, 1);
722 if (ret < 0)
723 return ret;
724 if (ret != 0) {
725 BUG();
727 BUG_ON(ret != 0);
728 l = path->nodes[0];
729 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
730 refs = btrfs_extent_refs(l, item);
731 btrfs_set_extent_refs(l, item, refs + 1);
732 btrfs_mark_buffer_dirty(path->nodes[0]);
734 btrfs_release_path(root->fs_info->extent_root, path);
736 path->reada = 1;
737 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
738 path, bytenr, root_objectid,
739 ref_generation, owner, owner_offset);
740 BUG_ON(ret);
741 finish_current_insert(trans, root->fs_info->extent_root);
742 del_pending_extents(trans, root->fs_info->extent_root);
744 btrfs_free_path(path);
745 return 0;
748 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
749 struct btrfs_root *root,
750 u64 bytenr, u64 num_bytes,
751 u64 root_objectid, u64 ref_generation,
752 u64 owner, u64 owner_offset)
754 int ret;
756 mutex_lock(&root->fs_info->alloc_mutex);
757 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
758 root_objectid, ref_generation,
759 owner, owner_offset);
760 mutex_unlock(&root->fs_info->alloc_mutex);
761 return ret;
764 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
765 struct btrfs_root *root)
767 finish_current_insert(trans, root->fs_info->extent_root);
768 del_pending_extents(trans, root->fs_info->extent_root);
769 return 0;
772 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
773 struct btrfs_root *root, u64 bytenr,
774 u64 num_bytes, u32 *refs)
776 struct btrfs_path *path;
777 int ret;
778 struct btrfs_key key;
779 struct extent_buffer *l;
780 struct btrfs_extent_item *item;
782 WARN_ON(num_bytes < root->sectorsize);
783 path = btrfs_alloc_path();
784 path->reada = 1;
785 key.objectid = bytenr;
786 key.offset = num_bytes;
787 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
788 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
789 0, 0);
790 if (ret < 0)
791 goto out;
792 if (ret != 0) {
793 btrfs_print_leaf(root, path->nodes[0]);
794 printk("failed to find block number %Lu\n", bytenr);
795 BUG();
797 l = path->nodes[0];
798 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
799 *refs = btrfs_extent_refs(l, item);
800 out:
801 btrfs_free_path(path);
802 return 0;
806 static int get_reference_status(struct btrfs_root *root, u64 bytenr,
807 u64 parent_gen, u64 ref_objectid,
808 u64 *min_generation, u32 *ref_count)
810 struct btrfs_root *extent_root = root->fs_info->extent_root;
811 struct btrfs_path *path;
812 struct extent_buffer *leaf;
813 struct btrfs_extent_ref *ref_item;
814 struct btrfs_key key;
815 struct btrfs_key found_key;
816 u64 root_objectid = root->root_key.objectid;
817 u64 ref_generation;
818 u32 nritems;
819 int ret;
821 key.objectid = bytenr;
822 key.offset = 0;
823 key.type = BTRFS_EXTENT_ITEM_KEY;
825 path = btrfs_alloc_path();
826 mutex_lock(&root->fs_info->alloc_mutex);
827 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
828 if (ret < 0)
829 goto out;
830 BUG_ON(ret == 0);
832 leaf = path->nodes[0];
833 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
835 if (found_key.objectid != bytenr ||
836 found_key.type != BTRFS_EXTENT_ITEM_KEY) {
837 ret = 1;
838 goto out;
841 *ref_count = 0;
842 *min_generation = (u64)-1;
844 while (1) {
845 leaf = path->nodes[0];
846 nritems = btrfs_header_nritems(leaf);
847 if (path->slots[0] >= nritems) {
848 ret = btrfs_next_leaf(extent_root, path);
849 if (ret < 0)
850 goto out;
851 if (ret == 0)
852 continue;
853 break;
855 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
856 if (found_key.objectid != bytenr)
857 break;
859 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
860 path->slots[0]++;
861 continue;
864 ref_item = btrfs_item_ptr(leaf, path->slots[0],
865 struct btrfs_extent_ref);
866 ref_generation = btrfs_ref_generation(leaf, ref_item);
868 * For (parent_gen > 0 && parent_gen > ref_gen):
870 * we reach here through the oldest root, therefore
871 * all other reference from same snapshot should have
872 * a larger generation.
874 if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
875 (parent_gen > 0 && parent_gen > ref_generation) ||
876 (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
877 ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
878 if (ref_count)
879 *ref_count = 2;
880 break;
883 *ref_count = 1;
884 if (*min_generation > ref_generation)
885 *min_generation = ref_generation;
887 path->slots[0]++;
889 ret = 0;
890 out:
891 mutex_unlock(&root->fs_info->alloc_mutex);
892 btrfs_free_path(path);
893 return ret;
896 int btrfs_cross_ref_exists(struct btrfs_root *root,
897 struct btrfs_key *key, u64 bytenr)
899 struct btrfs_trans_handle *trans;
900 struct btrfs_root *old_root;
901 struct btrfs_path *path = NULL;
902 struct extent_buffer *eb;
903 struct btrfs_file_extent_item *item;
904 u64 ref_generation;
905 u64 min_generation;
906 u64 extent_start;
907 u32 ref_count;
908 int level;
909 int ret;
911 BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
912 ret = get_reference_status(root, bytenr, 0, key->objectid,
913 &min_generation, &ref_count);
914 if (ret)
915 return ret;
917 if (ref_count != 1)
918 return 1;
920 trans = btrfs_start_transaction(root, 0);
921 old_root = root->dirty_root->root;
922 ref_generation = old_root->root_key.offset;
924 /* all references are created in running transaction */
925 if (min_generation > ref_generation) {
926 ret = 0;
927 goto out;
930 path = btrfs_alloc_path();
931 if (!path) {
932 ret = -ENOMEM;
933 goto out;
936 path->skip_locking = 1;
937 /* if no item found, the extent is referenced by other snapshot */
938 ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
939 if (ret)
940 goto out;
942 eb = path->nodes[0];
943 item = btrfs_item_ptr(eb, path->slots[0],
944 struct btrfs_file_extent_item);
945 if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
946 btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
947 ret = 1;
948 goto out;
951 for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
952 if (level >= 0) {
953 eb = path->nodes[level];
954 if (!eb)
955 continue;
956 extent_start = eb->start;
957 } else
958 extent_start = bytenr;
960 ret = get_reference_status(root, extent_start, ref_generation,
961 0, &min_generation, &ref_count);
962 if (ret)
963 goto out;
965 if (ref_count != 1) {
966 ret = 1;
967 goto out;
969 if (level >= 0)
970 ref_generation = btrfs_header_generation(eb);
972 ret = 0;
973 out:
974 if (path)
975 btrfs_free_path(path);
976 btrfs_end_transaction(trans, root);
977 return ret;
980 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
981 struct extent_buffer *buf, int cache_ref)
983 u64 bytenr;
984 u32 nritems;
985 struct btrfs_key key;
986 struct btrfs_file_extent_item *fi;
987 int i;
988 int level;
989 int ret;
990 int faili;
991 int nr_file_extents = 0;
993 if (!root->ref_cows)
994 return 0;
996 level = btrfs_header_level(buf);
997 nritems = btrfs_header_nritems(buf);
998 for (i = 0; i < nritems; i++) {
999 cond_resched();
1000 if (level == 0) {
1001 u64 disk_bytenr;
1002 btrfs_item_key_to_cpu(buf, &key, i);
1003 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1004 continue;
1005 fi = btrfs_item_ptr(buf, i,
1006 struct btrfs_file_extent_item);
1007 if (btrfs_file_extent_type(buf, fi) ==
1008 BTRFS_FILE_EXTENT_INLINE)
1009 continue;
1010 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1011 if (disk_bytenr == 0)
1012 continue;
1014 if (buf != root->commit_root)
1015 nr_file_extents++;
1017 mutex_lock(&root->fs_info->alloc_mutex);
1018 ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
1019 btrfs_file_extent_disk_num_bytes(buf, fi),
1020 root->root_key.objectid, trans->transid,
1021 key.objectid, key.offset);
1022 mutex_unlock(&root->fs_info->alloc_mutex);
1023 if (ret) {
1024 faili = i;
1025 WARN_ON(1);
1026 goto fail;
1028 } else {
1029 bytenr = btrfs_node_blockptr(buf, i);
1030 btrfs_node_key_to_cpu(buf, &key, i);
1032 mutex_lock(&root->fs_info->alloc_mutex);
1033 ret = __btrfs_inc_extent_ref(trans, root, bytenr,
1034 btrfs_level_size(root, level - 1),
1035 root->root_key.objectid,
1036 trans->transid,
1037 level - 1, key.objectid);
1038 mutex_unlock(&root->fs_info->alloc_mutex);
1039 if (ret) {
1040 faili = i;
1041 WARN_ON(1);
1042 goto fail;
1046 /* cache orignal leaf block's references */
1047 if (level == 0 && cache_ref && buf != root->commit_root) {
1048 struct btrfs_leaf_ref *ref;
1049 struct btrfs_extent_info *info;
1051 ref = btrfs_alloc_leaf_ref(nr_file_extents);
1052 if (!ref) {
1053 WARN_ON(1);
1054 goto out;
1057 ref->bytenr = buf->start;
1058 ref->owner = btrfs_header_owner(buf);
1059 ref->generation = btrfs_header_generation(buf);
1060 ref->nritems = nr_file_extents;
1061 info = ref->extents;
1063 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1064 u64 disk_bytenr;
1065 btrfs_item_key_to_cpu(buf, &key, i);
1066 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1067 continue;
1068 fi = btrfs_item_ptr(buf, i,
1069 struct btrfs_file_extent_item);
1070 if (btrfs_file_extent_type(buf, fi) ==
1071 BTRFS_FILE_EXTENT_INLINE)
1072 continue;
1073 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1074 if (disk_bytenr == 0)
1075 continue;
1077 info->bytenr = disk_bytenr;
1078 info->num_bytes =
1079 btrfs_file_extent_disk_num_bytes(buf, fi);
1080 info->objectid = key.objectid;
1081 info->offset = key.offset;
1082 info++;
1085 BUG_ON(!root->ref_tree);
1086 ret = btrfs_add_leaf_ref(root, ref);
1087 WARN_ON(ret);
1088 btrfs_free_leaf_ref(ref);
1090 out:
1091 return 0;
1092 fail:
1093 WARN_ON(1);
1094 #if 0
1095 for (i =0; i < faili; i++) {
1096 if (level == 0) {
1097 u64 disk_bytenr;
1098 btrfs_item_key_to_cpu(buf, &key, i);
1099 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1100 continue;
1101 fi = btrfs_item_ptr(buf, i,
1102 struct btrfs_file_extent_item);
1103 if (btrfs_file_extent_type(buf, fi) ==
1104 BTRFS_FILE_EXTENT_INLINE)
1105 continue;
1106 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1107 if (disk_bytenr == 0)
1108 continue;
1109 err = btrfs_free_extent(trans, root, disk_bytenr,
1110 btrfs_file_extent_disk_num_bytes(buf,
1111 fi), 0);
1112 BUG_ON(err);
1113 } else {
1114 bytenr = btrfs_node_blockptr(buf, i);
1115 err = btrfs_free_extent(trans, root, bytenr,
1116 btrfs_level_size(root, level - 1), 0);
1117 BUG_ON(err);
1120 #endif
1121 return ret;
1124 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1125 struct btrfs_root *root,
1126 struct btrfs_path *path,
1127 struct btrfs_block_group_cache *cache)
1129 int ret;
1130 int pending_ret;
1131 struct btrfs_root *extent_root = root->fs_info->extent_root;
1132 unsigned long bi;
1133 struct extent_buffer *leaf;
1135 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1136 if (ret < 0)
1137 goto fail;
1138 BUG_ON(ret);
1140 leaf = path->nodes[0];
1141 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1142 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1143 btrfs_mark_buffer_dirty(leaf);
1144 btrfs_release_path(extent_root, path);
1145 fail:
1146 finish_current_insert(trans, extent_root);
1147 pending_ret = del_pending_extents(trans, extent_root);
1148 if (ret)
1149 return ret;
1150 if (pending_ret)
1151 return pending_ret;
1152 return 0;
1156 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1157 struct btrfs_root *root)
1159 struct extent_io_tree *block_group_cache;
1160 struct btrfs_block_group_cache *cache;
1161 int ret;
1162 int err = 0;
1163 int werr = 0;
1164 struct btrfs_path *path;
1165 u64 last = 0;
1166 u64 start;
1167 u64 end;
1168 u64 ptr;
1170 block_group_cache = &root->fs_info->block_group_cache;
1171 path = btrfs_alloc_path();
1172 if (!path)
1173 return -ENOMEM;
1175 mutex_lock(&root->fs_info->alloc_mutex);
1176 while(1) {
1177 ret = find_first_extent_bit(block_group_cache, last,
1178 &start, &end, BLOCK_GROUP_DIRTY);
1179 if (ret)
1180 break;
1182 last = end + 1;
1183 ret = get_state_private(block_group_cache, start, &ptr);
1184 if (ret)
1185 break;
1186 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1187 err = write_one_cache_group(trans, root,
1188 path, cache);
1190 * if we fail to write the cache group, we want
1191 * to keep it marked dirty in hopes that a later
1192 * write will work
1194 if (err) {
1195 werr = err;
1196 continue;
1198 clear_extent_bits(block_group_cache, start, end,
1199 BLOCK_GROUP_DIRTY, GFP_NOFS);
1201 btrfs_free_path(path);
1202 mutex_unlock(&root->fs_info->alloc_mutex);
1203 return werr;
1206 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1207 u64 flags)
1209 struct list_head *head = &info->space_info;
1210 struct list_head *cur;
1211 struct btrfs_space_info *found;
1212 list_for_each(cur, head) {
1213 found = list_entry(cur, struct btrfs_space_info, list);
1214 if (found->flags == flags)
1215 return found;
1217 return NULL;
1221 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1222 u64 total_bytes, u64 bytes_used,
1223 struct btrfs_space_info **space_info)
1225 struct btrfs_space_info *found;
1227 found = __find_space_info(info, flags);
1228 if (found) {
1229 found->total_bytes += total_bytes;
1230 found->bytes_used += bytes_used;
1231 found->full = 0;
1232 WARN_ON(found->total_bytes < found->bytes_used);
1233 *space_info = found;
1234 return 0;
1236 found = kmalloc(sizeof(*found), GFP_NOFS);
1237 if (!found)
1238 return -ENOMEM;
1240 list_add(&found->list, &info->space_info);
1241 found->flags = flags;
1242 found->total_bytes = total_bytes;
1243 found->bytes_used = bytes_used;
1244 found->bytes_pinned = 0;
1245 found->full = 0;
1246 found->force_alloc = 0;
1247 *space_info = found;
1248 return 0;
1251 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1253 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1254 BTRFS_BLOCK_GROUP_RAID1 |
1255 BTRFS_BLOCK_GROUP_RAID10 |
1256 BTRFS_BLOCK_GROUP_DUP);
1257 if (extra_flags) {
1258 if (flags & BTRFS_BLOCK_GROUP_DATA)
1259 fs_info->avail_data_alloc_bits |= extra_flags;
1260 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1261 fs_info->avail_metadata_alloc_bits |= extra_flags;
1262 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1263 fs_info->avail_system_alloc_bits |= extra_flags;
1267 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1269 u64 num_devices = root->fs_info->fs_devices->num_devices;
1271 if (num_devices == 1)
1272 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1273 if (num_devices < 4)
1274 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1276 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1277 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1278 BTRFS_BLOCK_GROUP_RAID10))) {
1279 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1282 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1283 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1284 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1287 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1288 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1289 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1290 (flags & BTRFS_BLOCK_GROUP_DUP)))
1291 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1292 return flags;
1295 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1296 struct btrfs_root *extent_root, u64 alloc_bytes,
1297 u64 flags, int force)
1299 struct btrfs_space_info *space_info;
1300 u64 thresh;
1301 u64 start;
1302 u64 num_bytes;
1303 int ret;
1305 flags = reduce_alloc_profile(extent_root, flags);
1307 space_info = __find_space_info(extent_root->fs_info, flags);
1308 if (!space_info) {
1309 ret = update_space_info(extent_root->fs_info, flags,
1310 0, 0, &space_info);
1311 BUG_ON(ret);
1313 BUG_ON(!space_info);
1315 if (space_info->force_alloc) {
1316 force = 1;
1317 space_info->force_alloc = 0;
1319 if (space_info->full)
1320 goto out;
1322 thresh = div_factor(space_info->total_bytes, 6);
1323 if (!force &&
1324 (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1325 thresh)
1326 goto out;
1328 mutex_lock(&extent_root->fs_info->chunk_mutex);
1329 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1330 if (ret == -ENOSPC) {
1331 printk("space info full %Lu\n", flags);
1332 space_info->full = 1;
1333 goto out_unlock;
1335 BUG_ON(ret);
1337 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1338 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1339 BUG_ON(ret);
1340 out_unlock:
1341 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1342 out:
1343 return 0;
1346 static int update_block_group(struct btrfs_trans_handle *trans,
1347 struct btrfs_root *root,
1348 u64 bytenr, u64 num_bytes, int alloc,
1349 int mark_free)
1351 struct btrfs_block_group_cache *cache;
1352 struct btrfs_fs_info *info = root->fs_info;
1353 u64 total = num_bytes;
1354 u64 old_val;
1355 u64 byte_in_group;
1356 u64 start;
1357 u64 end;
1359 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1360 while(total) {
1361 cache = btrfs_lookup_block_group(info, bytenr);
1362 if (!cache) {
1363 return -1;
1365 byte_in_group = bytenr - cache->key.objectid;
1366 WARN_ON(byte_in_group > cache->key.offset);
1367 start = cache->key.objectid;
1368 end = start + cache->key.offset - 1;
1369 set_extent_bits(&info->block_group_cache, start, end,
1370 BLOCK_GROUP_DIRTY, GFP_NOFS);
1372 spin_lock(&cache->lock);
1373 old_val = btrfs_block_group_used(&cache->item);
1374 num_bytes = min(total, cache->key.offset - byte_in_group);
1375 if (alloc) {
1376 old_val += num_bytes;
1377 cache->space_info->bytes_used += num_bytes;
1378 btrfs_set_block_group_used(&cache->item, old_val);
1379 spin_unlock(&cache->lock);
1380 } else {
1381 old_val -= num_bytes;
1382 cache->space_info->bytes_used -= num_bytes;
1383 btrfs_set_block_group_used(&cache->item, old_val);
1384 spin_unlock(&cache->lock);
1385 if (mark_free) {
1386 set_extent_dirty(&info->free_space_cache,
1387 bytenr, bytenr + num_bytes - 1,
1388 GFP_NOFS);
1391 total -= num_bytes;
1392 bytenr += num_bytes;
1394 return 0;
1397 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1399 u64 start;
1400 u64 end;
1401 int ret;
1402 ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1403 search_start, &start, &end,
1404 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1405 BLOCK_GROUP_SYSTEM);
1406 if (ret)
1407 return 0;
1408 return start;
1412 static int update_pinned_extents(struct btrfs_root *root,
1413 u64 bytenr, u64 num, int pin)
1415 u64 len;
1416 struct btrfs_block_group_cache *cache;
1417 struct btrfs_fs_info *fs_info = root->fs_info;
1419 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1420 if (pin) {
1421 set_extent_dirty(&fs_info->pinned_extents,
1422 bytenr, bytenr + num - 1, GFP_NOFS);
1423 } else {
1424 clear_extent_dirty(&fs_info->pinned_extents,
1425 bytenr, bytenr + num - 1, GFP_NOFS);
1427 while (num > 0) {
1428 cache = btrfs_lookup_block_group(fs_info, bytenr);
1429 if (!cache) {
1430 u64 first = first_logical_byte(root, bytenr);
1431 WARN_ON(first < bytenr);
1432 len = min(first - bytenr, num);
1433 } else {
1434 len = min(num, cache->key.offset -
1435 (bytenr - cache->key.objectid));
1437 if (pin) {
1438 if (cache) {
1439 spin_lock(&cache->lock);
1440 cache->pinned += len;
1441 cache->space_info->bytes_pinned += len;
1442 spin_unlock(&cache->lock);
1444 fs_info->total_pinned += len;
1445 } else {
1446 if (cache) {
1447 spin_lock(&cache->lock);
1448 cache->pinned -= len;
1449 cache->space_info->bytes_pinned -= len;
1450 spin_unlock(&cache->lock);
1452 fs_info->total_pinned -= len;
1454 bytenr += len;
1455 num -= len;
1457 return 0;
1460 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1462 u64 last = 0;
1463 u64 start;
1464 u64 end;
1465 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1466 int ret;
1468 while(1) {
1469 ret = find_first_extent_bit(pinned_extents, last,
1470 &start, &end, EXTENT_DIRTY);
1471 if (ret)
1472 break;
1473 set_extent_dirty(copy, start, end, GFP_NOFS);
1474 last = end + 1;
1476 return 0;
1479 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1480 struct btrfs_root *root,
1481 struct extent_io_tree *unpin)
1483 u64 start;
1484 u64 end;
1485 int ret;
1486 struct extent_io_tree *free_space_cache;
1487 free_space_cache = &root->fs_info->free_space_cache;
1489 mutex_lock(&root->fs_info->alloc_mutex);
1490 while(1) {
1491 ret = find_first_extent_bit(unpin, 0, &start, &end,
1492 EXTENT_DIRTY);
1493 if (ret)
1494 break;
1495 update_pinned_extents(root, start, end + 1 - start, 0);
1496 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1497 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1498 if (need_resched()) {
1499 mutex_unlock(&root->fs_info->alloc_mutex);
1500 cond_resched();
1501 mutex_lock(&root->fs_info->alloc_mutex);
1504 mutex_unlock(&root->fs_info->alloc_mutex);
1505 return 0;
1508 static int finish_current_insert(struct btrfs_trans_handle *trans,
1509 struct btrfs_root *extent_root)
1511 u64 start;
1512 u64 end;
1513 struct btrfs_fs_info *info = extent_root->fs_info;
1514 struct extent_buffer *eb;
1515 struct btrfs_path *path;
1516 struct btrfs_key ins;
1517 struct btrfs_disk_key first;
1518 struct btrfs_extent_item extent_item;
1519 int ret;
1520 int level;
1521 int err = 0;
1523 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1524 btrfs_set_stack_extent_refs(&extent_item, 1);
1525 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1526 path = btrfs_alloc_path();
1528 while(1) {
1529 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1530 &end, EXTENT_LOCKED);
1531 if (ret)
1532 break;
1534 ins.objectid = start;
1535 ins.offset = end + 1 - start;
1536 err = btrfs_insert_item(trans, extent_root, &ins,
1537 &extent_item, sizeof(extent_item));
1538 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1539 GFP_NOFS);
1541 eb = btrfs_find_tree_block(extent_root, ins.objectid,
1542 ins.offset);
1544 if (!btrfs_buffer_uptodate(eb, trans->transid)) {
1545 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1546 btrfs_read_buffer(eb, trans->transid);
1547 mutex_lock(&extent_root->fs_info->alloc_mutex);
1550 btrfs_tree_lock(eb);
1551 level = btrfs_header_level(eb);
1552 if (level == 0) {
1553 btrfs_item_key(eb, &first, 0);
1554 } else {
1555 btrfs_node_key(eb, &first, 0);
1557 btrfs_tree_unlock(eb);
1558 free_extent_buffer(eb);
1560 * the first key is just a hint, so the race we've created
1561 * against reading it is fine
1563 err = btrfs_insert_extent_backref(trans, extent_root, path,
1564 start, extent_root->root_key.objectid,
1565 0, level,
1566 btrfs_disk_key_objectid(&first));
1567 BUG_ON(err);
1568 if (need_resched()) {
1569 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1570 cond_resched();
1571 mutex_lock(&extent_root->fs_info->alloc_mutex);
1574 btrfs_free_path(path);
1575 return 0;
1578 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1579 int pending)
1581 int err = 0;
1583 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1584 if (!pending) {
1585 struct extent_buffer *buf;
1586 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1587 if (buf) {
1588 if (btrfs_buffer_uptodate(buf, 0) &&
1589 btrfs_try_tree_lock(buf)) {
1590 u64 transid =
1591 root->fs_info->running_transaction->transid;
1592 u64 header_transid =
1593 btrfs_header_generation(buf);
1594 if (header_transid == transid &&
1595 !btrfs_header_flag(buf,
1596 BTRFS_HEADER_FLAG_WRITTEN)) {
1597 clean_tree_block(NULL, root, buf);
1598 btrfs_tree_unlock(buf);
1599 free_extent_buffer(buf);
1600 return 1;
1602 btrfs_tree_unlock(buf);
1604 free_extent_buffer(buf);
1606 update_pinned_extents(root, bytenr, num_bytes, 1);
1607 } else {
1608 set_extent_bits(&root->fs_info->pending_del,
1609 bytenr, bytenr + num_bytes - 1,
1610 EXTENT_LOCKED, GFP_NOFS);
1612 BUG_ON(err < 0);
1613 return 0;
1617 * remove an extent from the root, returns 0 on success
1619 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1620 *root, u64 bytenr, u64 num_bytes,
1621 u64 root_objectid, u64 ref_generation,
1622 u64 owner_objectid, u64 owner_offset, int pin,
1623 int mark_free)
1625 struct btrfs_path *path;
1626 struct btrfs_key key;
1627 struct btrfs_fs_info *info = root->fs_info;
1628 struct btrfs_root *extent_root = info->extent_root;
1629 struct extent_buffer *leaf;
1630 int ret;
1631 int extent_slot = 0;
1632 int found_extent = 0;
1633 int num_to_del = 1;
1634 struct btrfs_extent_item *ei;
1635 u32 refs;
1637 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1638 key.objectid = bytenr;
1639 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1640 key.offset = num_bytes;
1641 path = btrfs_alloc_path();
1642 if (!path)
1643 return -ENOMEM;
1645 path->reada = 1;
1646 ret = lookup_extent_backref(trans, extent_root, path,
1647 bytenr, root_objectid,
1648 ref_generation,
1649 owner_objectid, owner_offset, 1);
1650 if (ret == 0) {
1651 struct btrfs_key found_key;
1652 extent_slot = path->slots[0];
1653 while(extent_slot > 0) {
1654 extent_slot--;
1655 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1656 extent_slot);
1657 if (found_key.objectid != bytenr)
1658 break;
1659 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1660 found_key.offset == num_bytes) {
1661 found_extent = 1;
1662 break;
1664 if (path->slots[0] - extent_slot > 5)
1665 break;
1667 if (!found_extent)
1668 ret = btrfs_del_item(trans, extent_root, path);
1669 } else {
1670 btrfs_print_leaf(extent_root, path->nodes[0]);
1671 WARN_ON(1);
1672 printk("Unable to find ref byte nr %Lu root %Lu "
1673 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1674 root_objectid, ref_generation, owner_objectid,
1675 owner_offset);
1677 if (!found_extent) {
1678 btrfs_release_path(extent_root, path);
1679 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1680 if (ret < 0)
1681 return ret;
1682 BUG_ON(ret);
1683 extent_slot = path->slots[0];
1686 leaf = path->nodes[0];
1687 ei = btrfs_item_ptr(leaf, extent_slot,
1688 struct btrfs_extent_item);
1689 refs = btrfs_extent_refs(leaf, ei);
1690 BUG_ON(refs == 0);
1691 refs -= 1;
1692 btrfs_set_extent_refs(leaf, ei, refs);
1694 btrfs_mark_buffer_dirty(leaf);
1696 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1697 /* if the back ref and the extent are next to each other
1698 * they get deleted below in one shot
1700 path->slots[0] = extent_slot;
1701 num_to_del = 2;
1702 } else if (found_extent) {
1703 /* otherwise delete the extent back ref */
1704 ret = btrfs_del_item(trans, extent_root, path);
1705 BUG_ON(ret);
1706 /* if refs are 0, we need to setup the path for deletion */
1707 if (refs == 0) {
1708 btrfs_release_path(extent_root, path);
1709 ret = btrfs_search_slot(trans, extent_root, &key, path,
1710 -1, 1);
1711 if (ret < 0)
1712 return ret;
1713 BUG_ON(ret);
1717 if (refs == 0) {
1718 u64 super_used;
1719 u64 root_used;
1721 if (pin) {
1722 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1723 if (ret > 0)
1724 mark_free = 1;
1725 BUG_ON(ret < 0);
1728 /* block accounting for super block */
1729 spin_lock_irq(&info->delalloc_lock);
1730 super_used = btrfs_super_bytes_used(&info->super_copy);
1731 btrfs_set_super_bytes_used(&info->super_copy,
1732 super_used - num_bytes);
1733 spin_unlock_irq(&info->delalloc_lock);
1735 /* block accounting for root item */
1736 root_used = btrfs_root_used(&root->root_item);
1737 btrfs_set_root_used(&root->root_item,
1738 root_used - num_bytes);
1739 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1740 num_to_del);
1741 if (ret) {
1742 return ret;
1744 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1745 mark_free);
1746 BUG_ON(ret);
1748 btrfs_free_path(path);
1749 finish_current_insert(trans, extent_root);
1750 return ret;
1754 * find all the blocks marked as pending in the radix tree and remove
1755 * them from the extent map
1757 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1758 btrfs_root *extent_root)
1760 int ret;
1761 int err = 0;
1762 u64 start;
1763 u64 end;
1764 struct extent_io_tree *pending_del;
1765 struct extent_io_tree *pinned_extents;
1767 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1768 pending_del = &extent_root->fs_info->pending_del;
1769 pinned_extents = &extent_root->fs_info->pinned_extents;
1771 while(1) {
1772 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1773 EXTENT_LOCKED);
1774 if (ret)
1775 break;
1776 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1777 GFP_NOFS);
1778 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1779 start, end, EXTENT_LOCKED, 0)) {
1780 update_pinned_extents(extent_root, start,
1781 end + 1 - start, 1);
1782 ret = __free_extent(trans, extent_root,
1783 start, end + 1 - start,
1784 extent_root->root_key.objectid,
1785 0, 0, 0, 0, 0);
1786 } else {
1787 clear_extent_bits(&extent_root->fs_info->extent_ins,
1788 start, end, EXTENT_LOCKED, GFP_NOFS);
1790 if (ret)
1791 err = ret;
1793 if (need_resched()) {
1794 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1795 cond_resched();
1796 mutex_lock(&extent_root->fs_info->alloc_mutex);
1799 return err;
1803 * remove an extent from the root, returns 0 on success
1805 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1806 struct btrfs_root *root, u64 bytenr,
1807 u64 num_bytes, u64 root_objectid,
1808 u64 ref_generation, u64 owner_objectid,
1809 u64 owner_offset, int pin)
1811 struct btrfs_root *extent_root = root->fs_info->extent_root;
1812 int pending_ret;
1813 int ret;
1815 WARN_ON(num_bytes < root->sectorsize);
1816 if (!root->ref_cows)
1817 ref_generation = 0;
1819 if (root == extent_root) {
1820 pin_down_bytes(root, bytenr, num_bytes, 1);
1821 return 0;
1823 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1824 ref_generation, owner_objectid, owner_offset,
1825 pin, pin == 0);
1827 finish_current_insert(trans, root->fs_info->extent_root);
1828 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1829 return ret ? ret : pending_ret;
1832 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1833 struct btrfs_root *root, u64 bytenr,
1834 u64 num_bytes, u64 root_objectid,
1835 u64 ref_generation, u64 owner_objectid,
1836 u64 owner_offset, int pin)
1838 int ret;
1840 maybe_lock_mutex(root);
1841 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1842 root_objectid, ref_generation,
1843 owner_objectid, owner_offset, pin);
1844 maybe_unlock_mutex(root);
1845 return ret;
1848 static u64 stripe_align(struct btrfs_root *root, u64 val)
1850 u64 mask = ((u64)root->stripesize - 1);
1851 u64 ret = (val + mask) & ~mask;
1852 return ret;
1856 * walks the btree of allocated extents and find a hole of a given size.
1857 * The key ins is changed to record the hole:
1858 * ins->objectid == block start
1859 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1860 * ins->offset == number of blocks
1861 * Any available blocks before search_start are skipped.
1863 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1864 struct btrfs_root *orig_root,
1865 u64 num_bytes, u64 empty_size,
1866 u64 search_start, u64 search_end,
1867 u64 hint_byte, struct btrfs_key *ins,
1868 u64 exclude_start, u64 exclude_nr,
1869 int data)
1871 int ret;
1872 u64 orig_search_start;
1873 struct btrfs_root * root = orig_root->fs_info->extent_root;
1874 struct btrfs_fs_info *info = root->fs_info;
1875 u64 total_needed = num_bytes;
1876 u64 *last_ptr = NULL;
1877 struct btrfs_block_group_cache *block_group;
1878 int full_scan = 0;
1879 int wrapped = 0;
1880 int chunk_alloc_done = 0;
1881 int empty_cluster = 2 * 1024 * 1024;
1882 int allowed_chunk_alloc = 0;
1884 WARN_ON(num_bytes < root->sectorsize);
1885 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1887 if (orig_root->ref_cows || empty_size)
1888 allowed_chunk_alloc = 1;
1890 if (data & BTRFS_BLOCK_GROUP_METADATA) {
1891 last_ptr = &root->fs_info->last_alloc;
1892 empty_cluster = 256 * 1024;
1895 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1896 last_ptr = &root->fs_info->last_data_alloc;
1899 if (last_ptr) {
1900 if (*last_ptr)
1901 hint_byte = *last_ptr;
1902 else {
1903 empty_size += empty_cluster;
1907 search_start = max(search_start, first_logical_byte(root, 0));
1908 orig_search_start = search_start;
1910 if (search_end == (u64)-1)
1911 search_end = btrfs_super_total_bytes(&info->super_copy);
1913 if (hint_byte) {
1914 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1915 if (!block_group)
1916 hint_byte = search_start;
1917 block_group = btrfs_find_block_group(root, block_group,
1918 hint_byte, data, 1);
1919 if (last_ptr && *last_ptr == 0 && block_group)
1920 hint_byte = block_group->key.objectid;
1921 } else {
1922 block_group = btrfs_find_block_group(root,
1923 trans->block_group,
1924 search_start, data, 1);
1926 search_start = max(search_start, hint_byte);
1928 total_needed += empty_size;
1930 check_failed:
1931 if (!block_group) {
1932 block_group = btrfs_lookup_first_block_group(info,
1933 search_start);
1934 if (!block_group)
1935 block_group = btrfs_lookup_first_block_group(info,
1936 orig_search_start);
1938 if (full_scan && !chunk_alloc_done) {
1939 if (allowed_chunk_alloc) {
1940 do_chunk_alloc(trans, root,
1941 num_bytes + 2 * 1024 * 1024, data, 1);
1942 allowed_chunk_alloc = 0;
1943 } else if (block_group && block_group_bits(block_group, data)) {
1944 block_group->space_info->force_alloc = 1;
1946 chunk_alloc_done = 1;
1948 ret = find_search_start(root, &block_group, &search_start,
1949 total_needed, data);
1950 if (ret == -ENOSPC && last_ptr && *last_ptr) {
1951 *last_ptr = 0;
1952 block_group = btrfs_lookup_first_block_group(info,
1953 orig_search_start);
1954 search_start = orig_search_start;
1955 ret = find_search_start(root, &block_group, &search_start,
1956 total_needed, data);
1958 if (ret == -ENOSPC)
1959 goto enospc;
1960 if (ret)
1961 goto error;
1963 if (last_ptr && *last_ptr && search_start != *last_ptr) {
1964 *last_ptr = 0;
1965 if (!empty_size) {
1966 empty_size += empty_cluster;
1967 total_needed += empty_size;
1969 block_group = btrfs_lookup_first_block_group(info,
1970 orig_search_start);
1971 search_start = orig_search_start;
1972 ret = find_search_start(root, &block_group,
1973 &search_start, total_needed, data);
1974 if (ret == -ENOSPC)
1975 goto enospc;
1976 if (ret)
1977 goto error;
1980 search_start = stripe_align(root, search_start);
1981 ins->objectid = search_start;
1982 ins->offset = num_bytes;
1984 if (ins->objectid + num_bytes >= search_end)
1985 goto enospc;
1987 if (ins->objectid + num_bytes >
1988 block_group->key.objectid + block_group->key.offset) {
1989 search_start = block_group->key.objectid +
1990 block_group->key.offset;
1991 goto new_group;
1994 if (test_range_bit(&info->extent_ins, ins->objectid,
1995 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1996 search_start = ins->objectid + num_bytes;
1997 goto new_group;
2000 if (test_range_bit(&info->pinned_extents, ins->objectid,
2001 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
2002 search_start = ins->objectid + num_bytes;
2003 goto new_group;
2006 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
2007 ins->objectid < exclude_start + exclude_nr)) {
2008 search_start = exclude_start + exclude_nr;
2009 goto new_group;
2012 if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
2013 block_group = btrfs_lookup_block_group(info, ins->objectid);
2014 if (block_group)
2015 trans->block_group = block_group;
2017 ins->offset = num_bytes;
2018 if (last_ptr) {
2019 *last_ptr = ins->objectid + ins->offset;
2020 if (*last_ptr ==
2021 btrfs_super_total_bytes(&root->fs_info->super_copy)) {
2022 *last_ptr = 0;
2025 return 0;
2027 new_group:
2028 if (search_start + num_bytes >= search_end) {
2029 enospc:
2030 search_start = orig_search_start;
2031 if (full_scan) {
2032 ret = -ENOSPC;
2033 goto error;
2035 if (wrapped) {
2036 if (!full_scan)
2037 total_needed -= empty_size;
2038 full_scan = 1;
2039 } else
2040 wrapped = 1;
2042 block_group = btrfs_lookup_first_block_group(info, search_start);
2043 cond_resched();
2044 block_group = btrfs_find_block_group(root, block_group,
2045 search_start, data, 0);
2046 goto check_failed;
2048 error:
2049 return ret;
2052 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2053 struct btrfs_root *root,
2054 u64 num_bytes, u64 min_alloc_size,
2055 u64 empty_size, u64 hint_byte,
2056 u64 search_end, struct btrfs_key *ins,
2057 u64 data)
2059 int ret;
2060 u64 search_start = 0;
2061 u64 alloc_profile;
2062 struct btrfs_fs_info *info = root->fs_info;
2064 if (data) {
2065 alloc_profile = info->avail_data_alloc_bits &
2066 info->data_alloc_profile;
2067 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2068 } else if (root == root->fs_info->chunk_root) {
2069 alloc_profile = info->avail_system_alloc_bits &
2070 info->system_alloc_profile;
2071 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2072 } else {
2073 alloc_profile = info->avail_metadata_alloc_bits &
2074 info->metadata_alloc_profile;
2075 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2077 again:
2078 data = reduce_alloc_profile(root, data);
2080 * the only place that sets empty_size is btrfs_realloc_node, which
2081 * is not called recursively on allocations
2083 if (empty_size || root->ref_cows) {
2084 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2085 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2086 2 * 1024 * 1024,
2087 BTRFS_BLOCK_GROUP_METADATA |
2088 (info->metadata_alloc_profile &
2089 info->avail_metadata_alloc_bits), 0);
2090 BUG_ON(ret);
2092 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2093 num_bytes + 2 * 1024 * 1024, data, 0);
2094 BUG_ON(ret);
2097 WARN_ON(num_bytes < root->sectorsize);
2098 ret = find_free_extent(trans, root, num_bytes, empty_size,
2099 search_start, search_end, hint_byte, ins,
2100 trans->alloc_exclude_start,
2101 trans->alloc_exclude_nr, data);
2103 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2104 num_bytes = num_bytes >> 1;
2105 num_bytes = max(num_bytes, min_alloc_size);
2106 do_chunk_alloc(trans, root->fs_info->extent_root,
2107 num_bytes, data, 1);
2108 goto again;
2110 if (ret) {
2111 printk("allocation failed flags %Lu\n", data);
2112 BUG();
2114 clear_extent_dirty(&root->fs_info->free_space_cache,
2115 ins->objectid, ins->objectid + ins->offset - 1,
2116 GFP_NOFS);
2117 return 0;
2120 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2121 struct btrfs_root *root,
2122 u64 num_bytes, u64 min_alloc_size,
2123 u64 empty_size, u64 hint_byte,
2124 u64 search_end, struct btrfs_key *ins,
2125 u64 data)
2127 int ret;
2128 maybe_lock_mutex(root);
2129 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2130 empty_size, hint_byte, search_end, ins,
2131 data);
2132 maybe_unlock_mutex(root);
2133 return ret;
2136 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2137 struct btrfs_root *root,
2138 u64 root_objectid, u64 ref_generation,
2139 u64 owner, u64 owner_offset,
2140 struct btrfs_key *ins)
2142 int ret;
2143 int pending_ret;
2144 u64 super_used;
2145 u64 root_used;
2146 u64 num_bytes = ins->offset;
2147 u32 sizes[2];
2148 struct btrfs_fs_info *info = root->fs_info;
2149 struct btrfs_root *extent_root = info->extent_root;
2150 struct btrfs_extent_item *extent_item;
2151 struct btrfs_extent_ref *ref;
2152 struct btrfs_path *path;
2153 struct btrfs_key keys[2];
2155 /* block accounting for super block */
2156 spin_lock_irq(&info->delalloc_lock);
2157 super_used = btrfs_super_bytes_used(&info->super_copy);
2158 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2159 spin_unlock_irq(&info->delalloc_lock);
2161 /* block accounting for root item */
2162 root_used = btrfs_root_used(&root->root_item);
2163 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2165 if (root == extent_root) {
2166 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2167 ins->objectid + ins->offset - 1,
2168 EXTENT_LOCKED, GFP_NOFS);
2169 goto update_block;
2172 memcpy(&keys[0], ins, sizeof(*ins));
2173 keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2174 owner, owner_offset);
2175 keys[1].objectid = ins->objectid;
2176 keys[1].type = BTRFS_EXTENT_REF_KEY;
2177 sizes[0] = sizeof(*extent_item);
2178 sizes[1] = sizeof(*ref);
2180 path = btrfs_alloc_path();
2181 BUG_ON(!path);
2183 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2184 sizes, 2);
2186 BUG_ON(ret);
2187 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2188 struct btrfs_extent_item);
2189 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2190 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2191 struct btrfs_extent_ref);
2193 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2194 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2195 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2196 btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2198 btrfs_mark_buffer_dirty(path->nodes[0]);
2200 trans->alloc_exclude_start = 0;
2201 trans->alloc_exclude_nr = 0;
2202 btrfs_free_path(path);
2203 finish_current_insert(trans, extent_root);
2204 pending_ret = del_pending_extents(trans, extent_root);
2206 if (ret)
2207 goto out;
2208 if (pending_ret) {
2209 ret = pending_ret;
2210 goto out;
2213 update_block:
2214 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2215 if (ret) {
2216 printk("update block group failed for %Lu %Lu\n",
2217 ins->objectid, ins->offset);
2218 BUG();
2220 out:
2221 return ret;
2224 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2225 struct btrfs_root *root,
2226 u64 root_objectid, u64 ref_generation,
2227 u64 owner, u64 owner_offset,
2228 struct btrfs_key *ins)
2230 int ret;
2231 maybe_lock_mutex(root);
2232 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2233 ref_generation, owner,
2234 owner_offset, ins);
2235 maybe_unlock_mutex(root);
2236 return ret;
2239 * finds a free extent and does all the dirty work required for allocation
2240 * returns the key for the extent through ins, and a tree buffer for
2241 * the first block of the extent through buf.
2243 * returns 0 if everything worked, non-zero otherwise.
2245 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2246 struct btrfs_root *root,
2247 u64 num_bytes, u64 min_alloc_size,
2248 u64 root_objectid, u64 ref_generation,
2249 u64 owner, u64 owner_offset,
2250 u64 empty_size, u64 hint_byte,
2251 u64 search_end, struct btrfs_key *ins, u64 data)
2253 int ret;
2255 maybe_lock_mutex(root);
2257 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2258 min_alloc_size, empty_size, hint_byte,
2259 search_end, ins, data);
2260 BUG_ON(ret);
2261 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2262 ref_generation, owner,
2263 owner_offset, ins);
2264 BUG_ON(ret);
2266 maybe_unlock_mutex(root);
2267 return ret;
2270 * helper function to allocate a block for a given tree
2271 * returns the tree buffer or NULL.
2273 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2274 struct btrfs_root *root,
2275 u32 blocksize,
2276 u64 root_objectid,
2277 u64 ref_generation,
2278 u64 first_objectid,
2279 int level,
2280 u64 hint,
2281 u64 empty_size)
2283 struct btrfs_key ins;
2284 int ret;
2285 struct extent_buffer *buf;
2287 ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2288 root_objectid, ref_generation,
2289 level, first_objectid, empty_size, hint,
2290 (u64)-1, &ins, 0);
2291 if (ret) {
2292 BUG_ON(ret > 0);
2293 return ERR_PTR(ret);
2295 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
2296 if (!buf) {
2297 btrfs_free_extent(trans, root, ins.objectid, blocksize,
2298 root->root_key.objectid, ref_generation,
2299 0, 0, 0);
2300 return ERR_PTR(-ENOMEM);
2302 btrfs_set_header_generation(buf, trans->transid);
2303 btrfs_tree_lock(buf);
2304 clean_tree_block(trans, root, buf);
2305 btrfs_set_buffer_uptodate(buf);
2307 if (PageDirty(buf->first_page)) {
2308 printk("page %lu dirty\n", buf->first_page->index);
2309 WARN_ON(1);
2312 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2313 buf->start + buf->len - 1, GFP_NOFS);
2314 trans->blocks_used++;
2315 return buf;
2318 static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
2319 struct btrfs_root *root,
2320 struct extent_buffer *leaf)
2322 u64 leaf_owner;
2323 u64 leaf_generation;
2324 struct btrfs_key key;
2325 struct btrfs_file_extent_item *fi;
2326 int i;
2327 int nritems;
2328 int ret;
2330 BUG_ON(!btrfs_is_leaf(leaf));
2331 nritems = btrfs_header_nritems(leaf);
2332 leaf_owner = btrfs_header_owner(leaf);
2333 leaf_generation = btrfs_header_generation(leaf);
2335 mutex_unlock(&root->fs_info->alloc_mutex);
2337 for (i = 0; i < nritems; i++) {
2338 u64 disk_bytenr;
2339 cond_resched();
2341 btrfs_item_key_to_cpu(leaf, &key, i);
2342 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2343 continue;
2344 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2345 if (btrfs_file_extent_type(leaf, fi) ==
2346 BTRFS_FILE_EXTENT_INLINE)
2347 continue;
2349 * FIXME make sure to insert a trans record that
2350 * repeats the snapshot del on crash
2352 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2353 if (disk_bytenr == 0)
2354 continue;
2356 mutex_lock(&root->fs_info->alloc_mutex);
2357 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2358 btrfs_file_extent_disk_num_bytes(leaf, fi),
2359 leaf_owner, leaf_generation,
2360 key.objectid, key.offset, 0);
2361 mutex_unlock(&root->fs_info->alloc_mutex);
2362 BUG_ON(ret);
2365 mutex_lock(&root->fs_info->alloc_mutex);
2366 return 0;
2369 static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
2370 struct btrfs_root *root,
2371 struct btrfs_leaf_ref *ref)
2373 int i;
2374 int ret;
2375 struct btrfs_extent_info *info = ref->extents;
2377 mutex_unlock(&root->fs_info->alloc_mutex);
2378 for (i = 0; i < ref->nritems; i++) {
2379 mutex_lock(&root->fs_info->alloc_mutex);
2380 ret = __btrfs_free_extent(trans, root,
2381 info->bytenr, info->num_bytes,
2382 ref->owner, ref->generation,
2383 info->objectid, info->offset, 0);
2384 mutex_unlock(&root->fs_info->alloc_mutex);
2385 BUG_ON(ret);
2386 info++;
2388 mutex_lock(&root->fs_info->alloc_mutex);
2390 return 0;
2393 static void noinline reada_walk_down(struct btrfs_root *root,
2394 struct extent_buffer *node,
2395 int slot)
2397 u64 bytenr;
2398 u64 last = 0;
2399 u32 nritems;
2400 u32 refs;
2401 u32 blocksize;
2402 int ret;
2403 int i;
2404 int level;
2405 int skipped = 0;
2407 nritems = btrfs_header_nritems(node);
2408 level = btrfs_header_level(node);
2409 if (level)
2410 return;
2412 for (i = slot; i < nritems && skipped < 32; i++) {
2413 bytenr = btrfs_node_blockptr(node, i);
2414 if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
2415 (last > bytenr && last - bytenr > 32 * 1024))) {
2416 skipped++;
2417 continue;
2419 blocksize = btrfs_level_size(root, level - 1);
2420 if (i != slot) {
2421 ret = lookup_extent_ref(NULL, root, bytenr,
2422 blocksize, &refs);
2423 BUG_ON(ret);
2424 if (refs != 1) {
2425 skipped++;
2426 continue;
2429 ret = readahead_tree_block(root, bytenr, blocksize,
2430 btrfs_node_ptr_generation(node, i));
2431 last = bytenr + blocksize;
2432 cond_resched();
2433 if (ret)
2434 break;
2438 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2439 u32 *refs)
2441 int ret;
2442 mutex_unlock(&root->fs_info->alloc_mutex);
2443 ret = lookup_extent_ref(NULL, root, start, len, refs);
2444 cond_resched();
2445 mutex_lock(&root->fs_info->alloc_mutex);
2446 return ret;
2450 * helper function for drop_snapshot, this walks down the tree dropping ref
2451 * counts as it goes.
2453 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root,
2455 struct btrfs_path *path, int *level)
2457 u64 root_owner;
2458 u64 root_gen;
2459 u64 bytenr;
2460 u64 ptr_gen;
2461 struct extent_buffer *next;
2462 struct extent_buffer *cur;
2463 struct extent_buffer *parent;
2464 struct btrfs_leaf_ref *ref;
2465 u32 blocksize;
2466 int ret;
2467 u32 refs;
2469 mutex_lock(&root->fs_info->alloc_mutex);
2471 WARN_ON(*level < 0);
2472 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2473 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2474 path->nodes[*level]->len, &refs);
2475 BUG_ON(ret);
2476 if (refs > 1)
2477 goto out;
2480 * walk down to the last node level and free all the leaves
2482 while(*level >= 0) {
2483 WARN_ON(*level < 0);
2484 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2485 cur = path->nodes[*level];
2487 if (btrfs_header_level(cur) != *level)
2488 WARN_ON(1);
2490 if (path->slots[*level] >=
2491 btrfs_header_nritems(cur))
2492 break;
2493 if (*level == 0) {
2494 ret = drop_leaf_ref_no_cache(trans, root, cur);
2495 BUG_ON(ret);
2496 break;
2498 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2499 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2500 blocksize = btrfs_level_size(root, *level - 1);
2502 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2503 BUG_ON(ret);
2504 if (refs != 1) {
2505 parent = path->nodes[*level];
2506 root_owner = btrfs_header_owner(parent);
2507 root_gen = btrfs_header_generation(parent);
2508 path->slots[*level]++;
2509 ret = __btrfs_free_extent(trans, root, bytenr,
2510 blocksize, root_owner,
2511 root_gen, 0, 0, 1);
2512 BUG_ON(ret);
2513 continue;
2516 if (*level == 1) {
2517 struct btrfs_key key;
2518 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2519 ref = btrfs_lookup_leaf_ref(root, bytenr);
2520 if (ref) {
2521 ret = drop_leaf_ref(trans, root, ref);
2522 BUG_ON(ret);
2523 btrfs_remove_leaf_ref(root, ref);
2524 btrfs_free_leaf_ref(ref);
2525 *level = 0;
2526 break;
2529 next = btrfs_find_tree_block(root, bytenr, blocksize);
2530 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2531 free_extent_buffer(next);
2532 mutex_unlock(&root->fs_info->alloc_mutex);
2534 if (path->slots[*level] == 0)
2535 reada_walk_down(root, cur, path->slots[*level]);
2536 next = read_tree_block(root, bytenr, blocksize,
2537 ptr_gen);
2538 cond_resched();
2539 mutex_lock(&root->fs_info->alloc_mutex);
2541 /* we've dropped the lock, double check */
2542 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2543 &refs);
2544 BUG_ON(ret);
2545 if (refs != 1) {
2546 parent = path->nodes[*level];
2547 root_owner = btrfs_header_owner(parent);
2548 root_gen = btrfs_header_generation(parent);
2550 path->slots[*level]++;
2551 free_extent_buffer(next);
2552 ret = __btrfs_free_extent(trans, root, bytenr,
2553 blocksize,
2554 root_owner,
2555 root_gen, 0, 0, 1);
2556 BUG_ON(ret);
2557 continue;
2560 WARN_ON(*level <= 0);
2561 if (path->nodes[*level-1])
2562 free_extent_buffer(path->nodes[*level-1]);
2563 path->nodes[*level-1] = next;
2564 *level = btrfs_header_level(next);
2565 path->slots[*level] = 0;
2567 out:
2568 WARN_ON(*level < 0);
2569 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2571 if (path->nodes[*level] == root->node) {
2572 parent = path->nodes[*level];
2573 bytenr = path->nodes[*level]->start;
2574 } else {
2575 parent = path->nodes[*level + 1];
2576 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2579 blocksize = btrfs_level_size(root, *level);
2580 root_owner = btrfs_header_owner(parent);
2581 root_gen = btrfs_header_generation(parent);
2583 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2584 root_owner, root_gen, 0, 0, 1);
2585 free_extent_buffer(path->nodes[*level]);
2586 path->nodes[*level] = NULL;
2587 *level += 1;
2588 BUG_ON(ret);
2589 mutex_unlock(&root->fs_info->alloc_mutex);
2590 cond_resched();
2591 return 0;
2595 * helper for dropping snapshots. This walks back up the tree in the path
2596 * to find the first node higher up where we haven't yet gone through
2597 * all the slots
2599 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2600 struct btrfs_root *root,
2601 struct btrfs_path *path, int *level)
2603 u64 root_owner;
2604 u64 root_gen;
2605 struct btrfs_root_item *root_item = &root->root_item;
2606 int i;
2607 int slot;
2608 int ret;
2610 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2611 slot = path->slots[i];
2612 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2613 struct extent_buffer *node;
2614 struct btrfs_disk_key disk_key;
2615 node = path->nodes[i];
2616 path->slots[i]++;
2617 *level = i;
2618 WARN_ON(*level == 0);
2619 btrfs_node_key(node, &disk_key, path->slots[i]);
2620 memcpy(&root_item->drop_progress,
2621 &disk_key, sizeof(disk_key));
2622 root_item->drop_level = i;
2623 return 0;
2624 } else {
2625 if (path->nodes[*level] == root->node) {
2626 root_owner = root->root_key.objectid;
2627 root_gen =
2628 btrfs_header_generation(path->nodes[*level]);
2629 } else {
2630 struct extent_buffer *node;
2631 node = path->nodes[*level + 1];
2632 root_owner = btrfs_header_owner(node);
2633 root_gen = btrfs_header_generation(node);
2635 ret = btrfs_free_extent(trans, root,
2636 path->nodes[*level]->start,
2637 path->nodes[*level]->len,
2638 root_owner, root_gen, 0, 0, 1);
2639 BUG_ON(ret);
2640 free_extent_buffer(path->nodes[*level]);
2641 path->nodes[*level] = NULL;
2642 *level = i + 1;
2645 return 1;
2649 * drop the reference count on the tree rooted at 'snap'. This traverses
2650 * the tree freeing any blocks that have a ref count of zero after being
2651 * decremented.
2653 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2654 *root)
2656 int ret = 0;
2657 int wret;
2658 int level;
2659 struct btrfs_path *path;
2660 int i;
2661 int orig_level;
2662 struct btrfs_root_item *root_item = &root->root_item;
2664 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2665 path = btrfs_alloc_path();
2666 BUG_ON(!path);
2668 level = btrfs_header_level(root->node);
2669 orig_level = level;
2670 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2671 path->nodes[level] = root->node;
2672 extent_buffer_get(root->node);
2673 path->slots[level] = 0;
2674 } else {
2675 struct btrfs_key key;
2676 struct btrfs_disk_key found_key;
2677 struct extent_buffer *node;
2679 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2680 level = root_item->drop_level;
2681 path->lowest_level = level;
2682 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2683 if (wret < 0) {
2684 ret = wret;
2685 goto out;
2687 node = path->nodes[level];
2688 btrfs_node_key(node, &found_key, path->slots[level]);
2689 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2690 sizeof(found_key)));
2692 * unlock our path, this is safe because only this
2693 * function is allowed to delete this snapshot
2695 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2696 if (path->nodes[i] && path->locks[i]) {
2697 path->locks[i] = 0;
2698 btrfs_tree_unlock(path->nodes[i]);
2702 while(1) {
2703 atomic_inc(&root->fs_info->throttle_gen);
2704 wret = walk_down_tree(trans, root, path, &level);
2705 if (wret > 0)
2706 break;
2707 if (wret < 0)
2708 ret = wret;
2710 wret = walk_up_tree(trans, root, path, &level);
2711 if (wret > 0)
2712 break;
2713 if (wret < 0)
2714 ret = wret;
2715 if (trans->transaction->in_commit) {
2716 ret = -EAGAIN;
2717 break;
2719 wake_up(&root->fs_info->transaction_throttle);
2721 for (i = 0; i <= orig_level; i++) {
2722 if (path->nodes[i]) {
2723 free_extent_buffer(path->nodes[i]);
2724 path->nodes[i] = NULL;
2727 out:
2728 btrfs_free_path(path);
2729 return ret;
2732 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2734 u64 start;
2735 u64 end;
2736 u64 ptr;
2737 int ret;
2739 mutex_lock(&info->alloc_mutex);
2740 while(1) {
2741 ret = find_first_extent_bit(&info->block_group_cache, 0,
2742 &start, &end, (unsigned int)-1);
2743 if (ret)
2744 break;
2745 ret = get_state_private(&info->block_group_cache, start, &ptr);
2746 if (!ret)
2747 kfree((void *)(unsigned long)ptr);
2748 clear_extent_bits(&info->block_group_cache, start,
2749 end, (unsigned int)-1, GFP_NOFS);
2751 while(1) {
2752 ret = find_first_extent_bit(&info->free_space_cache, 0,
2753 &start, &end, EXTENT_DIRTY);
2754 if (ret)
2755 break;
2756 clear_extent_dirty(&info->free_space_cache, start,
2757 end, GFP_NOFS);
2759 mutex_unlock(&info->alloc_mutex);
2760 return 0;
2763 static unsigned long calc_ra(unsigned long start, unsigned long last,
2764 unsigned long nr)
2766 return min(last, start + nr - 1);
2769 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2770 u64 len)
2772 u64 page_start;
2773 u64 page_end;
2774 unsigned long last_index;
2775 unsigned long i;
2776 struct page *page;
2777 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2778 struct file_ra_state *ra;
2779 unsigned long total_read = 0;
2780 unsigned long ra_pages;
2781 struct btrfs_ordered_extent *ordered;
2782 struct btrfs_trans_handle *trans;
2784 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2786 mutex_lock(&inode->i_mutex);
2787 i = start >> PAGE_CACHE_SHIFT;
2788 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2790 ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2792 file_ra_state_init(ra, inode->i_mapping);
2794 for (; i <= last_index; i++) {
2795 if (total_read % ra_pages == 0) {
2796 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2797 calc_ra(i, last_index, ra_pages));
2799 total_read++;
2800 again:
2801 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2802 goto truncate_racing;
2803 page = grab_cache_page(inode->i_mapping, i);
2804 if (!page) {
2805 goto out_unlock;
2807 if (!PageUptodate(page)) {
2808 btrfs_readpage(NULL, page);
2809 lock_page(page);
2810 if (!PageUptodate(page)) {
2811 unlock_page(page);
2812 page_cache_release(page);
2813 goto out_unlock;
2816 wait_on_page_writeback(page);
2818 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2819 page_end = page_start + PAGE_CACHE_SIZE - 1;
2820 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2822 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2823 if (ordered) {
2824 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2825 unlock_page(page);
2826 page_cache_release(page);
2827 btrfs_start_ordered_extent(inode, ordered, 1);
2828 btrfs_put_ordered_extent(ordered);
2829 goto again;
2831 set_page_extent_mapped(page);
2834 set_extent_delalloc(io_tree, page_start,
2835 page_end, GFP_NOFS);
2836 set_page_dirty(page);
2838 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2839 unlock_page(page);
2840 page_cache_release(page);
2843 out_unlock:
2844 /* we have to start the IO in order to get the ordered extents
2845 * instantiated. This allows the relocation to code to wait
2846 * for all the ordered extents to hit the disk.
2848 * Otherwise, it would constantly loop over the same extents
2849 * because the old ones don't get deleted until the IO is
2850 * started
2852 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2853 WB_SYNC_NONE);
2854 kfree(ra);
2855 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2856 if (trans) {
2857 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2858 mark_inode_dirty(inode);
2860 mutex_unlock(&inode->i_mutex);
2861 return 0;
2863 truncate_racing:
2864 vmtruncate(inode, inode->i_size);
2865 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2866 total_read);
2867 goto out_unlock;
2871 * The back references tell us which tree holds a ref on a block,
2872 * but it is possible for the tree root field in the reference to
2873 * reflect the original root before a snapshot was made. In this
2874 * case we should search through all the children of a given root
2875 * to find potential holders of references on a block.
2877 * Instead, we do something a little less fancy and just search
2878 * all the roots for a given key/block combination.
2880 static int find_root_for_ref(struct btrfs_root *root,
2881 struct btrfs_path *path,
2882 struct btrfs_key *key0,
2883 int level,
2884 int file_key,
2885 struct btrfs_root **found_root,
2886 u64 bytenr)
2888 struct btrfs_key root_location;
2889 struct btrfs_root *cur_root = *found_root;
2890 struct btrfs_file_extent_item *file_extent;
2891 u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
2892 u64 found_bytenr;
2893 int ret;
2895 root_location.offset = (u64)-1;
2896 root_location.type = BTRFS_ROOT_ITEM_KEY;
2897 path->lowest_level = level;
2898 path->reada = 0;
2899 while(1) {
2900 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
2901 found_bytenr = 0;
2902 if (ret == 0 && file_key) {
2903 struct extent_buffer *leaf = path->nodes[0];
2904 file_extent = btrfs_item_ptr(leaf, path->slots[0],
2905 struct btrfs_file_extent_item);
2906 if (btrfs_file_extent_type(leaf, file_extent) ==
2907 BTRFS_FILE_EXTENT_REG) {
2908 found_bytenr =
2909 btrfs_file_extent_disk_bytenr(leaf,
2910 file_extent);
2912 } else if (!file_key) {
2913 if (path->nodes[level])
2914 found_bytenr = path->nodes[level]->start;
2917 btrfs_release_path(cur_root, path);
2919 if (found_bytenr == bytenr) {
2920 *found_root = cur_root;
2921 ret = 0;
2922 goto out;
2924 ret = btrfs_search_root(root->fs_info->tree_root,
2925 root_search_start, &root_search_start);
2926 if (ret)
2927 break;
2929 root_location.objectid = root_search_start;
2930 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
2931 &root_location);
2932 if (!cur_root) {
2933 ret = 1;
2934 break;
2937 out:
2938 path->lowest_level = 0;
2939 return ret;
2943 * note, this releases the path
2945 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
2946 struct btrfs_path *path,
2947 struct btrfs_key *extent_key,
2948 u64 *last_file_objectid,
2949 u64 *last_file_offset,
2950 u64 *last_file_root,
2951 u64 last_extent)
2953 struct inode *inode;
2954 struct btrfs_root *found_root;
2955 struct btrfs_key root_location;
2956 struct btrfs_key found_key;
2957 struct btrfs_extent_ref *ref;
2958 u64 ref_root;
2959 u64 ref_gen;
2960 u64 ref_objectid;
2961 u64 ref_offset;
2962 int ret;
2963 int level;
2965 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
2967 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2968 struct btrfs_extent_ref);
2969 ref_root = btrfs_ref_root(path->nodes[0], ref);
2970 ref_gen = btrfs_ref_generation(path->nodes[0], ref);
2971 ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
2972 ref_offset = btrfs_ref_offset(path->nodes[0], ref);
2973 btrfs_release_path(extent_root, path);
2975 root_location.objectid = ref_root;
2976 if (ref_gen == 0)
2977 root_location.offset = 0;
2978 else
2979 root_location.offset = (u64)-1;
2980 root_location.type = BTRFS_ROOT_ITEM_KEY;
2982 found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
2983 &root_location);
2984 BUG_ON(!found_root);
2985 mutex_unlock(&extent_root->fs_info->alloc_mutex);
2987 if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2988 found_key.objectid = ref_objectid;
2989 found_key.type = BTRFS_EXTENT_DATA_KEY;
2990 found_key.offset = ref_offset;
2991 level = 0;
2993 if (last_extent == extent_key->objectid &&
2994 *last_file_objectid == ref_objectid &&
2995 *last_file_offset == ref_offset &&
2996 *last_file_root == ref_root)
2997 goto out;
2999 ret = find_root_for_ref(extent_root, path, &found_key,
3000 level, 1, &found_root,
3001 extent_key->objectid);
3003 if (ret)
3004 goto out;
3006 if (last_extent == extent_key->objectid &&
3007 *last_file_objectid == ref_objectid &&
3008 *last_file_offset == ref_offset &&
3009 *last_file_root == ref_root)
3010 goto out;
3012 inode = btrfs_iget_locked(extent_root->fs_info->sb,
3013 ref_objectid, found_root);
3014 if (inode->i_state & I_NEW) {
3015 /* the inode and parent dir are two different roots */
3016 BTRFS_I(inode)->root = found_root;
3017 BTRFS_I(inode)->location.objectid = ref_objectid;
3018 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
3019 BTRFS_I(inode)->location.offset = 0;
3020 btrfs_read_locked_inode(inode);
3021 unlock_new_inode(inode);
3024 /* this can happen if the reference is not against
3025 * the latest version of the tree root
3027 if (is_bad_inode(inode))
3028 goto out;
3030 *last_file_objectid = inode->i_ino;
3031 *last_file_root = found_root->root_key.objectid;
3032 *last_file_offset = ref_offset;
3034 relocate_inode_pages(inode, ref_offset, extent_key->offset);
3035 iput(inode);
3036 } else {
3037 struct btrfs_trans_handle *trans;
3038 struct extent_buffer *eb;
3039 int needs_lock = 0;
3041 eb = read_tree_block(found_root, extent_key->objectid,
3042 extent_key->offset, 0);
3043 btrfs_tree_lock(eb);
3044 level = btrfs_header_level(eb);
3046 if (level == 0)
3047 btrfs_item_key_to_cpu(eb, &found_key, 0);
3048 else
3049 btrfs_node_key_to_cpu(eb, &found_key, 0);
3051 btrfs_tree_unlock(eb);
3052 free_extent_buffer(eb);
3054 ret = find_root_for_ref(extent_root, path, &found_key,
3055 level, 0, &found_root,
3056 extent_key->objectid);
3058 if (ret)
3059 goto out;
3062 * right here almost anything could happen to our key,
3063 * but that's ok. The cow below will either relocate it
3064 * or someone else will have relocated it. Either way,
3065 * it is in a different spot than it was before and
3066 * we're happy.
3069 trans = btrfs_start_transaction(found_root, 1);
3071 if (found_root == extent_root->fs_info->extent_root ||
3072 found_root == extent_root->fs_info->chunk_root ||
3073 found_root == extent_root->fs_info->dev_root) {
3074 needs_lock = 1;
3075 mutex_lock(&extent_root->fs_info->alloc_mutex);
3078 path->lowest_level = level;
3079 path->reada = 2;
3080 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3081 0, 1);
3082 path->lowest_level = 0;
3083 btrfs_release_path(found_root, path);
3085 if (found_root == found_root->fs_info->extent_root)
3086 btrfs_extent_post_op(trans, found_root);
3087 if (needs_lock)
3088 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3090 btrfs_end_transaction(trans, found_root);
3093 out:
3094 mutex_lock(&extent_root->fs_info->alloc_mutex);
3095 return 0;
3098 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3099 struct btrfs_path *path,
3100 struct btrfs_key *extent_key)
3102 int ret;
3103 struct btrfs_trans_handle *trans;
3105 trans = btrfs_start_transaction(extent_root, 1);
3106 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3107 if (ret > 0) {
3108 ret = -EIO;
3109 goto out;
3111 if (ret < 0)
3112 goto out;
3113 ret = btrfs_del_item(trans, extent_root, path);
3114 out:
3115 btrfs_end_transaction(trans, extent_root);
3116 return ret;
3119 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3120 struct btrfs_path *path,
3121 struct btrfs_key *extent_key)
3123 struct btrfs_key key;
3124 struct btrfs_key found_key;
3125 struct extent_buffer *leaf;
3126 u64 last_file_objectid = 0;
3127 u64 last_file_root = 0;
3128 u64 last_file_offset = (u64)-1;
3129 u64 last_extent = 0;
3130 u32 nritems;
3131 u32 item_size;
3132 int ret = 0;
3134 if (extent_key->objectid == 0) {
3135 ret = del_extent_zero(extent_root, path, extent_key);
3136 goto out;
3138 key.objectid = extent_key->objectid;
3139 key.type = BTRFS_EXTENT_REF_KEY;
3140 key.offset = 0;
3142 while(1) {
3143 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3145 if (ret < 0)
3146 goto out;
3148 ret = 0;
3149 leaf = path->nodes[0];
3150 nritems = btrfs_header_nritems(leaf);
3151 if (path->slots[0] == nritems) {
3152 ret = btrfs_next_leaf(extent_root, path);
3153 if (ret > 0) {
3154 ret = 0;
3155 goto out;
3157 if (ret < 0)
3158 goto out;
3159 leaf = path->nodes[0];
3162 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3163 if (found_key.objectid != extent_key->objectid) {
3164 break;
3167 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3168 break;
3171 key.offset = found_key.offset + 1;
3172 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3174 ret = relocate_one_reference(extent_root, path, extent_key,
3175 &last_file_objectid,
3176 &last_file_offset,
3177 &last_file_root, last_extent);
3178 if (ret)
3179 goto out;
3180 last_extent = extent_key->objectid;
3182 ret = 0;
3183 out:
3184 btrfs_release_path(extent_root, path);
3185 return ret;
3188 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3190 u64 num_devices;
3191 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3192 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3194 num_devices = root->fs_info->fs_devices->num_devices;
3195 if (num_devices == 1) {
3196 stripped |= BTRFS_BLOCK_GROUP_DUP;
3197 stripped = flags & ~stripped;
3199 /* turn raid0 into single device chunks */
3200 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3201 return stripped;
3203 /* turn mirroring into duplication */
3204 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3205 BTRFS_BLOCK_GROUP_RAID10))
3206 return stripped | BTRFS_BLOCK_GROUP_DUP;
3207 return flags;
3208 } else {
3209 /* they already had raid on here, just return */
3210 if (flags & stripped)
3211 return flags;
3213 stripped |= BTRFS_BLOCK_GROUP_DUP;
3214 stripped = flags & ~stripped;
3216 /* switch duplicated blocks with raid1 */
3217 if (flags & BTRFS_BLOCK_GROUP_DUP)
3218 return stripped | BTRFS_BLOCK_GROUP_RAID1;
3220 /* turn single device chunks into raid0 */
3221 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3223 return flags;
3226 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3227 struct btrfs_block_group_cache *shrink_block_group,
3228 int force)
3230 struct btrfs_trans_handle *trans;
3231 u64 new_alloc_flags;
3232 u64 calc;
3234 spin_lock(&shrink_block_group->lock);
3235 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3236 spin_unlock(&shrink_block_group->lock);
3237 mutex_unlock(&root->fs_info->alloc_mutex);
3239 trans = btrfs_start_transaction(root, 1);
3240 mutex_lock(&root->fs_info->alloc_mutex);
3241 spin_lock(&shrink_block_group->lock);
3243 new_alloc_flags = update_block_group_flags(root,
3244 shrink_block_group->flags);
3245 if (new_alloc_flags != shrink_block_group->flags) {
3246 calc =
3247 btrfs_block_group_used(&shrink_block_group->item);
3248 } else {
3249 calc = shrink_block_group->key.offset;
3251 spin_unlock(&shrink_block_group->lock);
3253 do_chunk_alloc(trans, root->fs_info->extent_root,
3254 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3256 mutex_unlock(&root->fs_info->alloc_mutex);
3257 btrfs_end_transaction(trans, root);
3258 mutex_lock(&root->fs_info->alloc_mutex);
3259 } else
3260 spin_unlock(&shrink_block_group->lock);
3261 return 0;
3264 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3266 struct btrfs_trans_handle *trans;
3267 struct btrfs_root *tree_root = root->fs_info->tree_root;
3268 struct btrfs_path *path;
3269 u64 cur_byte;
3270 u64 total_found;
3271 u64 shrink_last_byte;
3272 struct btrfs_block_group_cache *shrink_block_group;
3273 struct btrfs_fs_info *info = root->fs_info;
3274 struct btrfs_key key;
3275 struct btrfs_key found_key;
3276 struct extent_buffer *leaf;
3277 u32 nritems;
3278 int ret;
3279 int progress;
3281 mutex_lock(&root->fs_info->alloc_mutex);
3282 shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3283 shrink_start);
3284 BUG_ON(!shrink_block_group);
3286 shrink_last_byte = shrink_block_group->key.objectid +
3287 shrink_block_group->key.offset;
3289 shrink_block_group->space_info->total_bytes -=
3290 shrink_block_group->key.offset;
3291 path = btrfs_alloc_path();
3292 root = root->fs_info->extent_root;
3293 path->reada = 2;
3295 printk("btrfs relocating block group %llu flags %llu\n",
3296 (unsigned long long)shrink_start,
3297 (unsigned long long)shrink_block_group->flags);
3299 __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3301 again:
3303 shrink_block_group->ro = 1;
3305 total_found = 0;
3306 progress = 0;
3307 key.objectid = shrink_start;
3308 key.offset = 0;
3309 key.type = 0;
3310 cur_byte = key.objectid;
3312 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3313 if (ret < 0)
3314 goto out;
3316 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3317 if (ret < 0)
3318 goto out;
3320 if (ret == 0) {
3321 leaf = path->nodes[0];
3322 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3323 if (found_key.objectid + found_key.offset > shrink_start &&
3324 found_key.objectid < shrink_last_byte) {
3325 cur_byte = found_key.objectid;
3326 key.objectid = cur_byte;
3329 btrfs_release_path(root, path);
3331 while(1) {
3332 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3333 if (ret < 0)
3334 goto out;
3336 next:
3337 leaf = path->nodes[0];
3338 nritems = btrfs_header_nritems(leaf);
3339 if (path->slots[0] >= nritems) {
3340 ret = btrfs_next_leaf(root, path);
3341 if (ret < 0)
3342 goto out;
3343 if (ret == 1) {
3344 ret = 0;
3345 break;
3347 leaf = path->nodes[0];
3348 nritems = btrfs_header_nritems(leaf);
3351 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3353 if (found_key.objectid >= shrink_last_byte)
3354 break;
3356 if (progress && need_resched()) {
3357 memcpy(&key, &found_key, sizeof(key));
3358 cond_resched();
3359 btrfs_release_path(root, path);
3360 btrfs_search_slot(NULL, root, &key, path, 0, 0);
3361 progress = 0;
3362 goto next;
3364 progress = 1;
3366 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3367 found_key.objectid + found_key.offset <= cur_byte) {
3368 memcpy(&key, &found_key, sizeof(key));
3369 key.offset++;
3370 path->slots[0]++;
3371 goto next;
3374 total_found++;
3375 cur_byte = found_key.objectid + found_key.offset;
3376 key.objectid = cur_byte;
3377 btrfs_release_path(root, path);
3378 ret = relocate_one_extent(root, path, &found_key);
3379 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3382 btrfs_release_path(root, path);
3384 if (total_found > 0) {
3385 printk("btrfs relocate found %llu last extent was %llu\n",
3386 (unsigned long long)total_found,
3387 (unsigned long long)found_key.objectid);
3388 mutex_unlock(&root->fs_info->alloc_mutex);
3389 trans = btrfs_start_transaction(tree_root, 1);
3390 btrfs_commit_transaction(trans, tree_root);
3392 btrfs_clean_old_snapshots(tree_root);
3394 btrfs_wait_ordered_extents(tree_root);
3396 trans = btrfs_start_transaction(tree_root, 1);
3397 btrfs_commit_transaction(trans, tree_root);
3398 mutex_lock(&root->fs_info->alloc_mutex);
3399 goto again;
3403 * we've freed all the extents, now remove the block
3404 * group item from the tree
3406 mutex_unlock(&root->fs_info->alloc_mutex);
3408 trans = btrfs_start_transaction(root, 1);
3410 mutex_lock(&root->fs_info->alloc_mutex);
3411 memcpy(&key, &shrink_block_group->key, sizeof(key));
3413 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3414 if (ret > 0)
3415 ret = -EIO;
3416 if (ret < 0) {
3417 btrfs_end_transaction(trans, root);
3418 goto out;
3421 clear_extent_bits(&info->block_group_cache, key.objectid,
3422 key.objectid + key.offset - 1,
3423 (unsigned int)-1, GFP_NOFS);
3426 clear_extent_bits(&info->free_space_cache,
3427 key.objectid, key.objectid + key.offset - 1,
3428 (unsigned int)-1, GFP_NOFS);
3430 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3431 kfree(shrink_block_group);
3433 btrfs_del_item(trans, root, path);
3434 btrfs_release_path(root, path);
3435 mutex_unlock(&root->fs_info->alloc_mutex);
3436 btrfs_commit_transaction(trans, root);
3438 mutex_lock(&root->fs_info->alloc_mutex);
3440 /* the code to unpin extents might set a few bits in the free
3441 * space cache for this range again
3443 clear_extent_bits(&info->free_space_cache,
3444 key.objectid, key.objectid + key.offset - 1,
3445 (unsigned int)-1, GFP_NOFS);
3446 out:
3447 btrfs_free_path(path);
3448 mutex_unlock(&root->fs_info->alloc_mutex);
3449 return ret;
3452 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3453 struct btrfs_key *key)
3455 int ret = 0;
3456 struct btrfs_key found_key;
3457 struct extent_buffer *leaf;
3458 int slot;
3460 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3461 if (ret < 0)
3462 goto out;
3464 while(1) {
3465 slot = path->slots[0];
3466 leaf = path->nodes[0];
3467 if (slot >= btrfs_header_nritems(leaf)) {
3468 ret = btrfs_next_leaf(root, path);
3469 if (ret == 0)
3470 continue;
3471 if (ret < 0)
3472 goto out;
3473 break;
3475 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3477 if (found_key.objectid >= key->objectid &&
3478 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3479 ret = 0;
3480 goto out;
3482 path->slots[0]++;
3484 ret = -ENOENT;
3485 out:
3486 return ret;
3489 int btrfs_read_block_groups(struct btrfs_root *root)
3491 struct btrfs_path *path;
3492 int ret;
3493 int bit;
3494 struct btrfs_block_group_cache *cache;
3495 struct btrfs_fs_info *info = root->fs_info;
3496 struct btrfs_space_info *space_info;
3497 struct extent_io_tree *block_group_cache;
3498 struct btrfs_key key;
3499 struct btrfs_key found_key;
3500 struct extent_buffer *leaf;
3502 block_group_cache = &info->block_group_cache;
3503 root = info->extent_root;
3504 key.objectid = 0;
3505 key.offset = 0;
3506 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3507 path = btrfs_alloc_path();
3508 if (!path)
3509 return -ENOMEM;
3511 mutex_lock(&root->fs_info->alloc_mutex);
3512 while(1) {
3513 ret = find_first_block_group(root, path, &key);
3514 if (ret > 0) {
3515 ret = 0;
3516 goto error;
3518 if (ret != 0)
3519 goto error;
3521 leaf = path->nodes[0];
3522 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3523 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3524 if (!cache) {
3525 ret = -ENOMEM;
3526 break;
3529 spin_lock_init(&cache->lock);
3530 read_extent_buffer(leaf, &cache->item,
3531 btrfs_item_ptr_offset(leaf, path->slots[0]),
3532 sizeof(cache->item));
3533 memcpy(&cache->key, &found_key, sizeof(found_key));
3535 key.objectid = found_key.objectid + found_key.offset;
3536 btrfs_release_path(root, path);
3537 cache->flags = btrfs_block_group_flags(&cache->item);
3538 bit = 0;
3539 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3540 bit = BLOCK_GROUP_DATA;
3541 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3542 bit = BLOCK_GROUP_SYSTEM;
3543 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3544 bit = BLOCK_GROUP_METADATA;
3546 set_avail_alloc_bits(info, cache->flags);
3548 ret = update_space_info(info, cache->flags, found_key.offset,
3549 btrfs_block_group_used(&cache->item),
3550 &space_info);
3551 BUG_ON(ret);
3552 cache->space_info = space_info;
3554 /* use EXTENT_LOCKED to prevent merging */
3555 set_extent_bits(block_group_cache, found_key.objectid,
3556 found_key.objectid + found_key.offset - 1,
3557 EXTENT_LOCKED, GFP_NOFS);
3558 set_state_private(block_group_cache, found_key.objectid,
3559 (unsigned long)cache);
3560 set_extent_bits(block_group_cache, found_key.objectid,
3561 found_key.objectid + found_key.offset - 1,
3562 bit | EXTENT_LOCKED, GFP_NOFS);
3563 if (key.objectid >=
3564 btrfs_super_total_bytes(&info->super_copy))
3565 break;
3567 ret = 0;
3568 error:
3569 btrfs_free_path(path);
3570 mutex_unlock(&root->fs_info->alloc_mutex);
3571 return ret;
3574 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3575 struct btrfs_root *root, u64 bytes_used,
3576 u64 type, u64 chunk_objectid, u64 chunk_offset,
3577 u64 size)
3579 int ret;
3580 int bit = 0;
3581 struct btrfs_root *extent_root;
3582 struct btrfs_block_group_cache *cache;
3583 struct extent_io_tree *block_group_cache;
3585 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3586 extent_root = root->fs_info->extent_root;
3587 block_group_cache = &root->fs_info->block_group_cache;
3589 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3590 BUG_ON(!cache);
3591 cache->key.objectid = chunk_offset;
3592 cache->key.offset = size;
3593 spin_lock_init(&cache->lock);
3594 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3596 btrfs_set_block_group_used(&cache->item, bytes_used);
3597 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3598 cache->flags = type;
3599 btrfs_set_block_group_flags(&cache->item, type);
3601 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3602 &cache->space_info);
3603 BUG_ON(ret);
3605 bit = block_group_state_bits(type);
3606 set_extent_bits(block_group_cache, chunk_offset,
3607 chunk_offset + size - 1,
3608 EXTENT_LOCKED, GFP_NOFS);
3609 set_state_private(block_group_cache, chunk_offset,
3610 (unsigned long)cache);
3611 set_extent_bits(block_group_cache, chunk_offset,
3612 chunk_offset + size - 1,
3613 bit | EXTENT_LOCKED, GFP_NOFS);
3615 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3616 sizeof(cache->item));
3617 BUG_ON(ret);
3619 finish_current_insert(trans, extent_root);
3620 ret = del_pending_extents(trans, extent_root);
3621 BUG_ON(ret);
3622 set_avail_alloc_bits(extent_root->fs_info, type);
3624 return 0;