Btrfs: make sure all pending extent operations are complete
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / extent-tree.c
blob376656f65b337f0b028bd8910feec3a9dd2bc1be
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include "compat.h"
24 #include "hash.h"
25 #include "crc32c.h"
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "print-tree.h"
29 #include "transaction.h"
30 #include "volumes.h"
31 #include "locking.h"
32 #include "ref-cache.h"
34 #define PENDING_EXTENT_INSERT 0
35 #define PENDING_EXTENT_DELETE 1
36 #define PENDING_BACKREF_UPDATE 2
38 struct pending_extent_op {
39 int type;
40 u64 bytenr;
41 u64 num_bytes;
42 u64 parent;
43 u64 orig_parent;
44 u64 generation;
45 u64 orig_generation;
46 int level;
47 struct list_head list;
48 int del;
51 static int finish_current_insert(struct btrfs_trans_handle *trans,
52 struct btrfs_root *extent_root, int all);
53 static int del_pending_extents(struct btrfs_trans_handle *trans,
54 struct btrfs_root *extent_root, int all);
55 static int pin_down_bytes(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 bytenr, u64 num_bytes, int is_data);
58 static int update_block_group(struct btrfs_trans_handle *trans,
59 struct btrfs_root *root,
60 u64 bytenr, u64 num_bytes, int alloc,
61 int mark_free);
63 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
65 return (cache->flags & bits) == bits;
69 * this adds the block group to the fs_info rb tree for the block group
70 * cache
72 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
73 struct btrfs_block_group_cache *block_group)
75 struct rb_node **p;
76 struct rb_node *parent = NULL;
77 struct btrfs_block_group_cache *cache;
79 spin_lock(&info->block_group_cache_lock);
80 p = &info->block_group_cache_tree.rb_node;
82 while (*p) {
83 parent = *p;
84 cache = rb_entry(parent, struct btrfs_block_group_cache,
85 cache_node);
86 if (block_group->key.objectid < cache->key.objectid) {
87 p = &(*p)->rb_left;
88 } else if (block_group->key.objectid > cache->key.objectid) {
89 p = &(*p)->rb_right;
90 } else {
91 spin_unlock(&info->block_group_cache_lock);
92 return -EEXIST;
96 rb_link_node(&block_group->cache_node, parent, p);
97 rb_insert_color(&block_group->cache_node,
98 &info->block_group_cache_tree);
99 spin_unlock(&info->block_group_cache_lock);
101 return 0;
105 * This will return the block group at or after bytenr if contains is 0, else
106 * it will return the block group that contains the bytenr
108 static struct btrfs_block_group_cache *
109 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
110 int contains)
112 struct btrfs_block_group_cache *cache, *ret = NULL;
113 struct rb_node *n;
114 u64 end, start;
116 spin_lock(&info->block_group_cache_lock);
117 n = info->block_group_cache_tree.rb_node;
119 while (n) {
120 cache = rb_entry(n, struct btrfs_block_group_cache,
121 cache_node);
122 end = cache->key.objectid + cache->key.offset - 1;
123 start = cache->key.objectid;
125 if (bytenr < start) {
126 if (!contains && (!ret || start < ret->key.objectid))
127 ret = cache;
128 n = n->rb_left;
129 } else if (bytenr > start) {
130 if (contains && bytenr <= end) {
131 ret = cache;
132 break;
134 n = n->rb_right;
135 } else {
136 ret = cache;
137 break;
140 if (ret)
141 atomic_inc(&ret->count);
142 spin_unlock(&info->block_group_cache_lock);
144 return ret;
148 * this is only called by cache_block_group, since we could have freed extents
149 * we need to check the pinned_extents for any extents that can't be used yet
150 * since their free space will be released as soon as the transaction commits.
152 static int add_new_free_space(struct btrfs_block_group_cache *block_group,
153 struct btrfs_fs_info *info, u64 start, u64 end)
155 u64 extent_start, extent_end, size;
156 int ret;
158 mutex_lock(&info->pinned_mutex);
159 while (start < end) {
160 ret = find_first_extent_bit(&info->pinned_extents, start,
161 &extent_start, &extent_end,
162 EXTENT_DIRTY);
163 if (ret)
164 break;
166 if (extent_start == start) {
167 start = extent_end + 1;
168 } else if (extent_start > start && extent_start < end) {
169 size = extent_start - start;
170 ret = btrfs_add_free_space(block_group, start,
171 size);
172 BUG_ON(ret);
173 start = extent_end + 1;
174 } else {
175 break;
179 if (start < end) {
180 size = end - start;
181 ret = btrfs_add_free_space(block_group, start, size);
182 BUG_ON(ret);
184 mutex_unlock(&info->pinned_mutex);
186 return 0;
189 static int remove_sb_from_cache(struct btrfs_root *root,
190 struct btrfs_block_group_cache *cache)
192 u64 bytenr;
193 u64 *logical;
194 int stripe_len;
195 int i, nr, ret;
197 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
198 bytenr = btrfs_sb_offset(i);
199 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
200 cache->key.objectid, bytenr, 0,
201 &logical, &nr, &stripe_len);
202 BUG_ON(ret);
203 while (nr--) {
204 btrfs_remove_free_space(cache, logical[nr],
205 stripe_len);
207 kfree(logical);
209 return 0;
212 static int cache_block_group(struct btrfs_root *root,
213 struct btrfs_block_group_cache *block_group)
215 struct btrfs_path *path;
216 int ret = 0;
217 struct btrfs_key key;
218 struct extent_buffer *leaf;
219 int slot;
220 u64 last;
222 if (!block_group)
223 return 0;
225 root = root->fs_info->extent_root;
227 if (block_group->cached)
228 return 0;
230 path = btrfs_alloc_path();
231 if (!path)
232 return -ENOMEM;
234 path->reada = 2;
236 * we get into deadlocks with paths held by callers of this function.
237 * since the alloc_mutex is protecting things right now, just
238 * skip the locking here
240 path->skip_locking = 1;
241 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
242 key.objectid = last;
243 key.offset = 0;
244 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
245 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
246 if (ret < 0)
247 goto err;
249 while (1) {
250 leaf = path->nodes[0];
251 slot = path->slots[0];
252 if (slot >= btrfs_header_nritems(leaf)) {
253 ret = btrfs_next_leaf(root, path);
254 if (ret < 0)
255 goto err;
256 if (ret == 0)
257 continue;
258 else
259 break;
261 btrfs_item_key_to_cpu(leaf, &key, slot);
262 if (key.objectid < block_group->key.objectid)
263 goto next;
265 if (key.objectid >= block_group->key.objectid +
266 block_group->key.offset)
267 break;
269 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
270 add_new_free_space(block_group, root->fs_info, last,
271 key.objectid);
273 last = key.objectid + key.offset;
275 next:
276 path->slots[0]++;
279 add_new_free_space(block_group, root->fs_info, last,
280 block_group->key.objectid +
281 block_group->key.offset);
283 remove_sb_from_cache(root, block_group);
284 block_group->cached = 1;
285 ret = 0;
286 err:
287 btrfs_free_path(path);
288 return ret;
292 * return the block group that starts at or after bytenr
294 static struct btrfs_block_group_cache *
295 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
297 struct btrfs_block_group_cache *cache;
299 cache = block_group_cache_tree_search(info, bytenr, 0);
301 return cache;
305 * return the block group that contains teh given bytenr
307 struct btrfs_block_group_cache *btrfs_lookup_block_group(
308 struct btrfs_fs_info *info,
309 u64 bytenr)
311 struct btrfs_block_group_cache *cache;
313 cache = block_group_cache_tree_search(info, bytenr, 1);
315 return cache;
318 static inline void put_block_group(struct btrfs_block_group_cache *cache)
320 if (atomic_dec_and_test(&cache->count))
321 kfree(cache);
324 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
325 u64 flags)
327 struct list_head *head = &info->space_info;
328 struct btrfs_space_info *found;
329 list_for_each_entry(found, head, list) {
330 if (found->flags == flags)
331 return found;
333 return NULL;
336 static u64 div_factor(u64 num, int factor)
338 if (factor == 10)
339 return num;
340 num *= factor;
341 do_div(num, 10);
342 return num;
345 u64 btrfs_find_block_group(struct btrfs_root *root,
346 u64 search_start, u64 search_hint, int owner)
348 struct btrfs_block_group_cache *cache;
349 u64 used;
350 u64 last = max(search_hint, search_start);
351 u64 group_start = 0;
352 int full_search = 0;
353 int factor = 9;
354 int wrapped = 0;
355 again:
356 while (1) {
357 cache = btrfs_lookup_first_block_group(root->fs_info, last);
358 if (!cache)
359 break;
361 spin_lock(&cache->lock);
362 last = cache->key.objectid + cache->key.offset;
363 used = btrfs_block_group_used(&cache->item);
365 if ((full_search || !cache->ro) &&
366 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
367 if (used + cache->pinned + cache->reserved <
368 div_factor(cache->key.offset, factor)) {
369 group_start = cache->key.objectid;
370 spin_unlock(&cache->lock);
371 put_block_group(cache);
372 goto found;
375 spin_unlock(&cache->lock);
376 put_block_group(cache);
377 cond_resched();
379 if (!wrapped) {
380 last = search_start;
381 wrapped = 1;
382 goto again;
384 if (!full_search && factor < 10) {
385 last = search_start;
386 full_search = 1;
387 factor = 10;
388 goto again;
390 found:
391 return group_start;
394 /* simple helper to search for an existing extent at a given offset */
395 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
397 int ret;
398 struct btrfs_key key;
399 struct btrfs_path *path;
401 path = btrfs_alloc_path();
402 BUG_ON(!path);
403 key.objectid = start;
404 key.offset = len;
405 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
406 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
407 0, 0);
408 btrfs_free_path(path);
409 return ret;
413 * Back reference rules. Back refs have three main goals:
415 * 1) differentiate between all holders of references to an extent so that
416 * when a reference is dropped we can make sure it was a valid reference
417 * before freeing the extent.
419 * 2) Provide enough information to quickly find the holders of an extent
420 * if we notice a given block is corrupted or bad.
422 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
423 * maintenance. This is actually the same as #2, but with a slightly
424 * different use case.
426 * File extents can be referenced by:
428 * - multiple snapshots, subvolumes, or different generations in one subvol
429 * - different files inside a single subvolume
430 * - different offsets inside a file (bookend extents in file.c)
432 * The extent ref structure has fields for:
434 * - Objectid of the subvolume root
435 * - Generation number of the tree holding the reference
436 * - objectid of the file holding the reference
437 * - number of references holding by parent node (alway 1 for tree blocks)
439 * Btree leaf may hold multiple references to a file extent. In most cases,
440 * these references are from same file and the corresponding offsets inside
441 * the file are close together.
443 * When a file extent is allocated the fields are filled in:
444 * (root_key.objectid, trans->transid, inode objectid, 1)
446 * When a leaf is cow'd new references are added for every file extent found
447 * in the leaf. It looks similar to the create case, but trans->transid will
448 * be different when the block is cow'd.
450 * (root_key.objectid, trans->transid, inode objectid,
451 * number of references in the leaf)
453 * When a file extent is removed either during snapshot deletion or
454 * file truncation, we find the corresponding back reference and check
455 * the following fields:
457 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
458 * inode objectid)
460 * Btree extents can be referenced by:
462 * - Different subvolumes
463 * - Different generations of the same subvolume
465 * When a tree block is created, back references are inserted:
467 * (root->root_key.objectid, trans->transid, level, 1)
469 * When a tree block is cow'd, new back references are added for all the
470 * blocks it points to. If the tree block isn't in reference counted root,
471 * the old back references are removed. These new back references are of
472 * the form (trans->transid will have increased since creation):
474 * (root->root_key.objectid, trans->transid, level, 1)
476 * When a backref is in deleting, the following fields are checked:
478 * if backref was for a tree root:
479 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
480 * else
481 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
483 * Back Reference Key composing:
485 * The key objectid corresponds to the first byte in the extent, the key
486 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
487 * byte of parent extent. If a extent is tree root, the key offset is set
488 * to the key objectid.
491 static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
492 struct btrfs_root *root,
493 struct btrfs_path *path,
494 u64 bytenr, u64 parent,
495 u64 ref_root, u64 ref_generation,
496 u64 owner_objectid, int del)
498 struct btrfs_key key;
499 struct btrfs_extent_ref *ref;
500 struct extent_buffer *leaf;
501 u64 ref_objectid;
502 int ret;
504 key.objectid = bytenr;
505 key.type = BTRFS_EXTENT_REF_KEY;
506 key.offset = parent;
508 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
509 if (ret < 0)
510 goto out;
511 if (ret > 0) {
512 ret = -ENOENT;
513 goto out;
516 leaf = path->nodes[0];
517 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
518 ref_objectid = btrfs_ref_objectid(leaf, ref);
519 if (btrfs_ref_root(leaf, ref) != ref_root ||
520 btrfs_ref_generation(leaf, ref) != ref_generation ||
521 (ref_objectid != owner_objectid &&
522 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
523 ret = -EIO;
524 WARN_ON(1);
525 goto out;
527 ret = 0;
528 out:
529 return ret;
533 * updates all the backrefs that are pending on update_list for the
534 * extent_root
536 static noinline int update_backrefs(struct btrfs_trans_handle *trans,
537 struct btrfs_root *extent_root,
538 struct btrfs_path *path,
539 struct list_head *update_list)
541 struct btrfs_key key;
542 struct btrfs_extent_ref *ref;
543 struct btrfs_fs_info *info = extent_root->fs_info;
544 struct pending_extent_op *op;
545 struct extent_buffer *leaf;
546 int ret = 0;
547 struct list_head *cur = update_list->next;
548 u64 ref_objectid;
549 u64 ref_root = extent_root->root_key.objectid;
551 op = list_entry(cur, struct pending_extent_op, list);
553 search:
554 key.objectid = op->bytenr;
555 key.type = BTRFS_EXTENT_REF_KEY;
556 key.offset = op->orig_parent;
558 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
559 BUG_ON(ret);
561 leaf = path->nodes[0];
563 loop:
564 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
566 ref_objectid = btrfs_ref_objectid(leaf, ref);
568 if (btrfs_ref_root(leaf, ref) != ref_root ||
569 btrfs_ref_generation(leaf, ref) != op->orig_generation ||
570 (ref_objectid != op->level &&
571 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
572 printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
573 "root %llu, owner %u\n",
574 (unsigned long long)op->bytenr,
575 (unsigned long long)op->orig_parent,
576 (unsigned long long)ref_root, op->level);
577 btrfs_print_leaf(extent_root, leaf);
578 BUG();
581 key.objectid = op->bytenr;
582 key.offset = op->parent;
583 key.type = BTRFS_EXTENT_REF_KEY;
584 ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
585 BUG_ON(ret);
586 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
587 btrfs_set_ref_generation(leaf, ref, op->generation);
589 cur = cur->next;
591 list_del_init(&op->list);
592 unlock_extent(&info->extent_ins, op->bytenr,
593 op->bytenr + op->num_bytes - 1, GFP_NOFS);
594 kfree(op);
596 if (cur == update_list) {
597 btrfs_mark_buffer_dirty(path->nodes[0]);
598 btrfs_release_path(extent_root, path);
599 goto out;
602 op = list_entry(cur, struct pending_extent_op, list);
604 path->slots[0]++;
605 while (path->slots[0] < btrfs_header_nritems(leaf)) {
606 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
607 if (key.objectid == op->bytenr &&
608 key.type == BTRFS_EXTENT_REF_KEY)
609 goto loop;
610 path->slots[0]++;
613 btrfs_mark_buffer_dirty(path->nodes[0]);
614 btrfs_release_path(extent_root, path);
615 goto search;
617 out:
618 return 0;
621 static noinline int insert_extents(struct btrfs_trans_handle *trans,
622 struct btrfs_root *extent_root,
623 struct btrfs_path *path,
624 struct list_head *insert_list, int nr)
626 struct btrfs_key *keys;
627 u32 *data_size;
628 struct pending_extent_op *op;
629 struct extent_buffer *leaf;
630 struct list_head *cur = insert_list->next;
631 struct btrfs_fs_info *info = extent_root->fs_info;
632 u64 ref_root = extent_root->root_key.objectid;
633 int i = 0, last = 0, ret;
634 int total = nr * 2;
636 if (!nr)
637 return 0;
639 keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
640 if (!keys)
641 return -ENOMEM;
643 data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
644 if (!data_size) {
645 kfree(keys);
646 return -ENOMEM;
649 list_for_each_entry(op, insert_list, list) {
650 keys[i].objectid = op->bytenr;
651 keys[i].offset = op->num_bytes;
652 keys[i].type = BTRFS_EXTENT_ITEM_KEY;
653 data_size[i] = sizeof(struct btrfs_extent_item);
654 i++;
656 keys[i].objectid = op->bytenr;
657 keys[i].offset = op->parent;
658 keys[i].type = BTRFS_EXTENT_REF_KEY;
659 data_size[i] = sizeof(struct btrfs_extent_ref);
660 i++;
663 op = list_entry(cur, struct pending_extent_op, list);
664 i = 0;
665 while (i < total) {
666 int c;
667 ret = btrfs_insert_some_items(trans, extent_root, path,
668 keys+i, data_size+i, total-i);
669 BUG_ON(ret < 0);
671 if (last && ret > 1)
672 BUG();
674 leaf = path->nodes[0];
675 for (c = 0; c < ret; c++) {
676 int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
679 * if the first item we inserted was a backref, then
680 * the EXTENT_ITEM will be the odd c's, else it will
681 * be the even c's
683 if ((ref_first && (c % 2)) ||
684 (!ref_first && !(c % 2))) {
685 struct btrfs_extent_item *itm;
687 itm = btrfs_item_ptr(leaf, path->slots[0] + c,
688 struct btrfs_extent_item);
689 btrfs_set_extent_refs(path->nodes[0], itm, 1);
690 op->del++;
691 } else {
692 struct btrfs_extent_ref *ref;
694 ref = btrfs_item_ptr(leaf, path->slots[0] + c,
695 struct btrfs_extent_ref);
696 btrfs_set_ref_root(leaf, ref, ref_root);
697 btrfs_set_ref_generation(leaf, ref,
698 op->generation);
699 btrfs_set_ref_objectid(leaf, ref, op->level);
700 btrfs_set_ref_num_refs(leaf, ref, 1);
701 op->del++;
705 * using del to see when its ok to free up the
706 * pending_extent_op. In the case where we insert the
707 * last item on the list in order to help do batching
708 * we need to not free the extent op until we actually
709 * insert the extent_item
711 if (op->del == 2) {
712 unlock_extent(&info->extent_ins, op->bytenr,
713 op->bytenr + op->num_bytes - 1,
714 GFP_NOFS);
715 cur = cur->next;
716 list_del_init(&op->list);
717 kfree(op);
718 if (cur != insert_list)
719 op = list_entry(cur,
720 struct pending_extent_op,
721 list);
724 btrfs_mark_buffer_dirty(leaf);
725 btrfs_release_path(extent_root, path);
728 * Ok backref's and items usually go right next to eachother,
729 * but if we could only insert 1 item that means that we
730 * inserted on the end of a leaf, and we have no idea what may
731 * be on the next leaf so we just play it safe. In order to
732 * try and help this case we insert the last thing on our
733 * insert list so hopefully it will end up being the last
734 * thing on the leaf and everything else will be before it,
735 * which will let us insert a whole bunch of items at the same
736 * time.
738 if (ret == 1 && !last && (i + ret < total)) {
740 * last: where we will pick up the next time around
741 * i: our current key to insert, will be total - 1
742 * cur: the current op we are screwing with
743 * op: duh
745 last = i + ret;
746 i = total - 1;
747 cur = insert_list->prev;
748 op = list_entry(cur, struct pending_extent_op, list);
749 } else if (last) {
751 * ok we successfully inserted the last item on the
752 * list, lets reset everything
754 * i: our current key to insert, so where we left off
755 * last time
756 * last: done with this
757 * cur: the op we are messing with
758 * op: duh
759 * total: since we inserted the last key, we need to
760 * decrement total so we dont overflow
762 i = last;
763 last = 0;
764 total--;
765 if (i < total) {
766 cur = insert_list->next;
767 op = list_entry(cur, struct pending_extent_op,
768 list);
770 } else {
771 i += ret;
774 cond_resched();
776 ret = 0;
777 kfree(keys);
778 kfree(data_size);
779 return ret;
782 static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
783 struct btrfs_root *root,
784 struct btrfs_path *path,
785 u64 bytenr, u64 parent,
786 u64 ref_root, u64 ref_generation,
787 u64 owner_objectid)
789 struct btrfs_key key;
790 struct extent_buffer *leaf;
791 struct btrfs_extent_ref *ref;
792 u32 num_refs;
793 int ret;
795 key.objectid = bytenr;
796 key.type = BTRFS_EXTENT_REF_KEY;
797 key.offset = parent;
799 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
800 if (ret == 0) {
801 leaf = path->nodes[0];
802 ref = btrfs_item_ptr(leaf, path->slots[0],
803 struct btrfs_extent_ref);
804 btrfs_set_ref_root(leaf, ref, ref_root);
805 btrfs_set_ref_generation(leaf, ref, ref_generation);
806 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
807 btrfs_set_ref_num_refs(leaf, ref, 1);
808 } else if (ret == -EEXIST) {
809 u64 existing_owner;
810 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
811 leaf = path->nodes[0];
812 ref = btrfs_item_ptr(leaf, path->slots[0],
813 struct btrfs_extent_ref);
814 if (btrfs_ref_root(leaf, ref) != ref_root ||
815 btrfs_ref_generation(leaf, ref) != ref_generation) {
816 ret = -EIO;
817 WARN_ON(1);
818 goto out;
821 num_refs = btrfs_ref_num_refs(leaf, ref);
822 BUG_ON(num_refs == 0);
823 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
825 existing_owner = btrfs_ref_objectid(leaf, ref);
826 if (existing_owner != owner_objectid &&
827 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
828 btrfs_set_ref_objectid(leaf, ref,
829 BTRFS_MULTIPLE_OBJECTIDS);
831 ret = 0;
832 } else {
833 goto out;
835 btrfs_mark_buffer_dirty(path->nodes[0]);
836 out:
837 btrfs_release_path(root, path);
838 return ret;
841 static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
842 struct btrfs_root *root,
843 struct btrfs_path *path)
845 struct extent_buffer *leaf;
846 struct btrfs_extent_ref *ref;
847 u32 num_refs;
848 int ret = 0;
850 leaf = path->nodes[0];
851 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
852 num_refs = btrfs_ref_num_refs(leaf, ref);
853 BUG_ON(num_refs == 0);
854 num_refs -= 1;
855 if (num_refs == 0) {
856 ret = btrfs_del_item(trans, root, path);
857 } else {
858 btrfs_set_ref_num_refs(leaf, ref, num_refs);
859 btrfs_mark_buffer_dirty(leaf);
861 btrfs_release_path(root, path);
862 return ret;
865 #ifdef BIO_RW_DISCARD
866 static void btrfs_issue_discard(struct block_device *bdev,
867 u64 start, u64 len)
869 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
871 #endif
873 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
874 u64 num_bytes)
876 #ifdef BIO_RW_DISCARD
877 int ret;
878 u64 map_length = num_bytes;
879 struct btrfs_multi_bio *multi = NULL;
881 /* Tell the block device(s) that the sectors can be discarded */
882 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
883 bytenr, &map_length, &multi, 0);
884 if (!ret) {
885 struct btrfs_bio_stripe *stripe = multi->stripes;
886 int i;
888 if (map_length > num_bytes)
889 map_length = num_bytes;
891 for (i = 0; i < multi->num_stripes; i++, stripe++) {
892 btrfs_issue_discard(stripe->dev->bdev,
893 stripe->physical,
894 map_length);
896 kfree(multi);
899 return ret;
900 #else
901 return 0;
902 #endif
905 static noinline int free_extents(struct btrfs_trans_handle *trans,
906 struct btrfs_root *extent_root,
907 struct list_head *del_list)
909 struct btrfs_fs_info *info = extent_root->fs_info;
910 struct btrfs_path *path;
911 struct btrfs_key key, found_key;
912 struct extent_buffer *leaf;
913 struct list_head *cur;
914 struct pending_extent_op *op;
915 struct btrfs_extent_item *ei;
916 int ret, num_to_del, extent_slot = 0, found_extent = 0;
917 u32 refs;
918 u64 bytes_freed = 0;
920 path = btrfs_alloc_path();
921 if (!path)
922 return -ENOMEM;
923 path->reada = 1;
925 search:
926 /* search for the backref for the current ref we want to delete */
927 cur = del_list->next;
928 op = list_entry(cur, struct pending_extent_op, list);
929 ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
930 op->orig_parent,
931 extent_root->root_key.objectid,
932 op->orig_generation, op->level, 1);
933 if (ret) {
934 printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
935 "root %llu gen %llu owner %u\n",
936 (unsigned long long)op->bytenr,
937 (unsigned long long)extent_root->root_key.objectid,
938 (unsigned long long)op->orig_generation, op->level);
939 btrfs_print_leaf(extent_root, path->nodes[0]);
940 WARN_ON(1);
941 goto out;
944 extent_slot = path->slots[0];
945 num_to_del = 1;
946 found_extent = 0;
949 * if we aren't the first item on the leaf we can move back one and see
950 * if our ref is right next to our extent item
952 if (likely(extent_slot)) {
953 extent_slot--;
954 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
955 extent_slot);
956 if (found_key.objectid == op->bytenr &&
957 found_key.type == BTRFS_EXTENT_ITEM_KEY &&
958 found_key.offset == op->num_bytes) {
959 num_to_del++;
960 found_extent = 1;
965 * if we didn't find the extent we need to delete the backref and then
966 * search for the extent item key so we can update its ref count
968 if (!found_extent) {
969 key.objectid = op->bytenr;
970 key.type = BTRFS_EXTENT_ITEM_KEY;
971 key.offset = op->num_bytes;
973 ret = remove_extent_backref(trans, extent_root, path);
974 BUG_ON(ret);
975 btrfs_release_path(extent_root, path);
976 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
977 BUG_ON(ret);
978 extent_slot = path->slots[0];
981 /* this is where we update the ref count for the extent */
982 leaf = path->nodes[0];
983 ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
984 refs = btrfs_extent_refs(leaf, ei);
985 BUG_ON(refs == 0);
986 refs--;
987 btrfs_set_extent_refs(leaf, ei, refs);
989 btrfs_mark_buffer_dirty(leaf);
992 * This extent needs deleting. The reason cur_slot is extent_slot +
993 * num_to_del is because extent_slot points to the slot where the extent
994 * is, and if the backref was not right next to the extent we will be
995 * deleting at least 1 item, and will want to start searching at the
996 * slot directly next to extent_slot. However if we did find the
997 * backref next to the extent item them we will be deleting at least 2
998 * items and will want to start searching directly after the ref slot
1000 if (!refs) {
1001 struct list_head *pos, *n, *end;
1002 int cur_slot = extent_slot+num_to_del;
1003 u64 super_used;
1004 u64 root_used;
1006 path->slots[0] = extent_slot;
1007 bytes_freed = op->num_bytes;
1009 mutex_lock(&info->pinned_mutex);
1010 ret = pin_down_bytes(trans, extent_root, op->bytenr,
1011 op->num_bytes, op->level >=
1012 BTRFS_FIRST_FREE_OBJECTID);
1013 mutex_unlock(&info->pinned_mutex);
1014 BUG_ON(ret < 0);
1015 op->del = ret;
1018 * we need to see if we can delete multiple things at once, so
1019 * start looping through the list of extents we are wanting to
1020 * delete and see if their extent/backref's are right next to
1021 * eachother and the extents only have 1 ref
1023 for (pos = cur->next; pos != del_list; pos = pos->next) {
1024 struct pending_extent_op *tmp;
1026 tmp = list_entry(pos, struct pending_extent_op, list);
1028 /* we only want to delete extent+ref at this stage */
1029 if (cur_slot >= btrfs_header_nritems(leaf) - 1)
1030 break;
1032 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
1033 if (found_key.objectid != tmp->bytenr ||
1034 found_key.type != BTRFS_EXTENT_ITEM_KEY ||
1035 found_key.offset != tmp->num_bytes)
1036 break;
1038 /* check to make sure this extent only has one ref */
1039 ei = btrfs_item_ptr(leaf, cur_slot,
1040 struct btrfs_extent_item);
1041 if (btrfs_extent_refs(leaf, ei) != 1)
1042 break;
1044 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
1045 if (found_key.objectid != tmp->bytenr ||
1046 found_key.type != BTRFS_EXTENT_REF_KEY ||
1047 found_key.offset != tmp->orig_parent)
1048 break;
1051 * the ref is right next to the extent, we can set the
1052 * ref count to 0 since we will delete them both now
1054 btrfs_set_extent_refs(leaf, ei, 0);
1056 /* pin down the bytes for this extent */
1057 mutex_lock(&info->pinned_mutex);
1058 ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
1059 tmp->num_bytes, tmp->level >=
1060 BTRFS_FIRST_FREE_OBJECTID);
1061 mutex_unlock(&info->pinned_mutex);
1062 BUG_ON(ret < 0);
1065 * use the del field to tell if we need to go ahead and
1066 * free up the extent when we delete the item or not.
1068 tmp->del = ret;
1069 bytes_freed += tmp->num_bytes;
1071 num_to_del += 2;
1072 cur_slot += 2;
1074 end = pos;
1076 /* update the free space counters */
1077 spin_lock(&info->delalloc_lock);
1078 super_used = btrfs_super_bytes_used(&info->super_copy);
1079 btrfs_set_super_bytes_used(&info->super_copy,
1080 super_used - bytes_freed);
1082 root_used = btrfs_root_used(&extent_root->root_item);
1083 btrfs_set_root_used(&extent_root->root_item,
1084 root_used - bytes_freed);
1085 spin_unlock(&info->delalloc_lock);
1087 /* delete the items */
1088 ret = btrfs_del_items(trans, extent_root, path,
1089 path->slots[0], num_to_del);
1090 BUG_ON(ret);
1093 * loop through the extents we deleted and do the cleanup work
1094 * on them
1096 for (pos = cur, n = pos->next; pos != end;
1097 pos = n, n = pos->next) {
1098 struct pending_extent_op *tmp;
1099 tmp = list_entry(pos, struct pending_extent_op, list);
1102 * remember tmp->del tells us wether or not we pinned
1103 * down the extent
1105 ret = update_block_group(trans, extent_root,
1106 tmp->bytenr, tmp->num_bytes, 0,
1107 tmp->del);
1108 BUG_ON(ret);
1110 list_del_init(&tmp->list);
1111 unlock_extent(&info->extent_ins, tmp->bytenr,
1112 tmp->bytenr + tmp->num_bytes - 1,
1113 GFP_NOFS);
1114 kfree(tmp);
1116 } else if (refs && found_extent) {
1118 * the ref and extent were right next to eachother, but the
1119 * extent still has a ref, so just free the backref and keep
1120 * going
1122 ret = remove_extent_backref(trans, extent_root, path);
1123 BUG_ON(ret);
1125 list_del_init(&op->list);
1126 unlock_extent(&info->extent_ins, op->bytenr,
1127 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1128 kfree(op);
1129 } else {
1131 * the extent has multiple refs and the backref we were looking
1132 * for was not right next to it, so just unlock and go next,
1133 * we're good to go
1135 list_del_init(&op->list);
1136 unlock_extent(&info->extent_ins, op->bytenr,
1137 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1138 kfree(op);
1141 btrfs_release_path(extent_root, path);
1142 if (!list_empty(del_list))
1143 goto search;
1145 out:
1146 btrfs_free_path(path);
1147 return ret;
1150 static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1151 struct btrfs_root *root, u64 bytenr,
1152 u64 orig_parent, u64 parent,
1153 u64 orig_root, u64 ref_root,
1154 u64 orig_generation, u64 ref_generation,
1155 u64 owner_objectid)
1157 int ret;
1158 struct btrfs_root *extent_root = root->fs_info->extent_root;
1159 struct btrfs_path *path;
1161 if (root == root->fs_info->extent_root) {
1162 struct pending_extent_op *extent_op;
1163 u64 num_bytes;
1165 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
1166 num_bytes = btrfs_level_size(root, (int)owner_objectid);
1167 mutex_lock(&root->fs_info->extent_ins_mutex);
1168 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
1169 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
1170 u64 priv;
1171 ret = get_state_private(&root->fs_info->extent_ins,
1172 bytenr, &priv);
1173 BUG_ON(ret);
1174 extent_op = (struct pending_extent_op *)
1175 (unsigned long)priv;
1176 BUG_ON(extent_op->parent != orig_parent);
1177 BUG_ON(extent_op->generation != orig_generation);
1179 extent_op->parent = parent;
1180 extent_op->generation = ref_generation;
1181 } else {
1182 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
1183 BUG_ON(!extent_op);
1185 extent_op->type = PENDING_BACKREF_UPDATE;
1186 extent_op->bytenr = bytenr;
1187 extent_op->num_bytes = num_bytes;
1188 extent_op->parent = parent;
1189 extent_op->orig_parent = orig_parent;
1190 extent_op->generation = ref_generation;
1191 extent_op->orig_generation = orig_generation;
1192 extent_op->level = (int)owner_objectid;
1193 INIT_LIST_HEAD(&extent_op->list);
1194 extent_op->del = 0;
1196 set_extent_bits(&root->fs_info->extent_ins,
1197 bytenr, bytenr + num_bytes - 1,
1198 EXTENT_WRITEBACK, GFP_NOFS);
1199 set_state_private(&root->fs_info->extent_ins,
1200 bytenr, (unsigned long)extent_op);
1202 mutex_unlock(&root->fs_info->extent_ins_mutex);
1203 return 0;
1206 path = btrfs_alloc_path();
1207 if (!path)
1208 return -ENOMEM;
1209 ret = lookup_extent_backref(trans, extent_root, path,
1210 bytenr, orig_parent, orig_root,
1211 orig_generation, owner_objectid, 1);
1212 if (ret)
1213 goto out;
1214 ret = remove_extent_backref(trans, extent_root, path);
1215 if (ret)
1216 goto out;
1217 ret = insert_extent_backref(trans, extent_root, path, bytenr,
1218 parent, ref_root, ref_generation,
1219 owner_objectid);
1220 BUG_ON(ret);
1221 finish_current_insert(trans, extent_root, 0);
1222 del_pending_extents(trans, extent_root, 0);
1223 out:
1224 btrfs_free_path(path);
1225 return ret;
1228 int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1229 struct btrfs_root *root, u64 bytenr,
1230 u64 orig_parent, u64 parent,
1231 u64 ref_root, u64 ref_generation,
1232 u64 owner_objectid)
1234 int ret;
1235 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1236 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1237 return 0;
1238 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
1239 parent, ref_root, ref_root,
1240 ref_generation, ref_generation,
1241 owner_objectid);
1242 return ret;
1245 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1246 struct btrfs_root *root, u64 bytenr,
1247 u64 orig_parent, u64 parent,
1248 u64 orig_root, u64 ref_root,
1249 u64 orig_generation, u64 ref_generation,
1250 u64 owner_objectid)
1252 struct btrfs_path *path;
1253 int ret;
1254 struct btrfs_key key;
1255 struct extent_buffer *l;
1256 struct btrfs_extent_item *item;
1257 u32 refs;
1259 path = btrfs_alloc_path();
1260 if (!path)
1261 return -ENOMEM;
1263 path->reada = 1;
1264 key.objectid = bytenr;
1265 key.type = BTRFS_EXTENT_ITEM_KEY;
1266 key.offset = (u64)-1;
1268 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1269 0, 1);
1270 if (ret < 0)
1271 return ret;
1272 BUG_ON(ret == 0 || path->slots[0] == 0);
1274 path->slots[0]--;
1275 l = path->nodes[0];
1277 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1278 if (key.objectid != bytenr) {
1279 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
1280 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
1281 (unsigned long long)bytenr,
1282 (unsigned long long)key.objectid);
1283 BUG();
1285 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
1287 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1288 refs = btrfs_extent_refs(l, item);
1289 btrfs_set_extent_refs(l, item, refs + 1);
1290 btrfs_mark_buffer_dirty(path->nodes[0]);
1292 btrfs_release_path(root->fs_info->extent_root, path);
1294 path->reada = 1;
1295 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1296 path, bytenr, parent,
1297 ref_root, ref_generation,
1298 owner_objectid);
1299 BUG_ON(ret);
1300 finish_current_insert(trans, root->fs_info->extent_root, 0);
1301 del_pending_extents(trans, root->fs_info->extent_root, 0);
1303 btrfs_free_path(path);
1304 return 0;
1307 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1308 struct btrfs_root *root,
1309 u64 bytenr, u64 num_bytes, u64 parent,
1310 u64 ref_root, u64 ref_generation,
1311 u64 owner_objectid)
1313 int ret;
1314 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1315 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1316 return 0;
1317 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
1318 0, ref_root, 0, ref_generation,
1319 owner_objectid);
1320 return ret;
1323 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1324 struct btrfs_root *root)
1326 u64 start;
1327 u64 end;
1328 int ret;
1330 while(1) {
1331 finish_current_insert(trans, root->fs_info->extent_root, 1);
1332 del_pending_extents(trans, root->fs_info->extent_root, 1);
1334 /* is there more work to do? */
1335 ret = find_first_extent_bit(&root->fs_info->pending_del,
1336 0, &start, &end, EXTENT_WRITEBACK);
1337 if (!ret)
1338 continue;
1339 ret = find_first_extent_bit(&root->fs_info->extent_ins,
1340 0, &start, &end, EXTENT_WRITEBACK);
1341 if (!ret)
1342 continue;
1343 break;
1345 return 0;
1348 int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1349 struct btrfs_root *root, u64 bytenr,
1350 u64 num_bytes, u32 *refs)
1352 struct btrfs_path *path;
1353 int ret;
1354 struct btrfs_key key;
1355 struct extent_buffer *l;
1356 struct btrfs_extent_item *item;
1358 WARN_ON(num_bytes < root->sectorsize);
1359 path = btrfs_alloc_path();
1360 path->reada = 1;
1361 key.objectid = bytenr;
1362 key.offset = num_bytes;
1363 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1364 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1365 0, 0);
1366 if (ret < 0)
1367 goto out;
1368 if (ret != 0) {
1369 btrfs_print_leaf(root, path->nodes[0]);
1370 printk(KERN_INFO "btrfs failed to find block number %llu\n",
1371 (unsigned long long)bytenr);
1372 BUG();
1374 l = path->nodes[0];
1375 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1376 *refs = btrfs_extent_refs(l, item);
1377 out:
1378 btrfs_free_path(path);
1379 return 0;
1382 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1383 struct btrfs_root *root, u64 objectid, u64 bytenr)
1385 struct btrfs_root *extent_root = root->fs_info->extent_root;
1386 struct btrfs_path *path;
1387 struct extent_buffer *leaf;
1388 struct btrfs_extent_ref *ref_item;
1389 struct btrfs_key key;
1390 struct btrfs_key found_key;
1391 u64 ref_root;
1392 u64 last_snapshot;
1393 u32 nritems;
1394 int ret;
1396 key.objectid = bytenr;
1397 key.offset = (u64)-1;
1398 key.type = BTRFS_EXTENT_ITEM_KEY;
1400 path = btrfs_alloc_path();
1401 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1402 if (ret < 0)
1403 goto out;
1404 BUG_ON(ret == 0);
1406 ret = -ENOENT;
1407 if (path->slots[0] == 0)
1408 goto out;
1410 path->slots[0]--;
1411 leaf = path->nodes[0];
1412 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1414 if (found_key.objectid != bytenr ||
1415 found_key.type != BTRFS_EXTENT_ITEM_KEY)
1416 goto out;
1418 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1419 while (1) {
1420 leaf = path->nodes[0];
1421 nritems = btrfs_header_nritems(leaf);
1422 if (path->slots[0] >= nritems) {
1423 ret = btrfs_next_leaf(extent_root, path);
1424 if (ret < 0)
1425 goto out;
1426 if (ret == 0)
1427 continue;
1428 break;
1430 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1431 if (found_key.objectid != bytenr)
1432 break;
1434 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1435 path->slots[0]++;
1436 continue;
1439 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1440 struct btrfs_extent_ref);
1441 ref_root = btrfs_ref_root(leaf, ref_item);
1442 if ((ref_root != root->root_key.objectid &&
1443 ref_root != BTRFS_TREE_LOG_OBJECTID) ||
1444 objectid != btrfs_ref_objectid(leaf, ref_item)) {
1445 ret = 1;
1446 goto out;
1448 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1449 ret = 1;
1450 goto out;
1453 path->slots[0]++;
1455 ret = 0;
1456 out:
1457 btrfs_free_path(path);
1458 return ret;
1461 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1462 struct extent_buffer *buf, u32 nr_extents)
1464 struct btrfs_key key;
1465 struct btrfs_file_extent_item *fi;
1466 u64 root_gen;
1467 u32 nritems;
1468 int i;
1469 int level;
1470 int ret = 0;
1471 int shared = 0;
1473 if (!root->ref_cows)
1474 return 0;
1476 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1477 shared = 0;
1478 root_gen = root->root_key.offset;
1479 } else {
1480 shared = 1;
1481 root_gen = trans->transid - 1;
1484 level = btrfs_header_level(buf);
1485 nritems = btrfs_header_nritems(buf);
1487 if (level == 0) {
1488 struct btrfs_leaf_ref *ref;
1489 struct btrfs_extent_info *info;
1491 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1492 if (!ref) {
1493 ret = -ENOMEM;
1494 goto out;
1497 ref->root_gen = root_gen;
1498 ref->bytenr = buf->start;
1499 ref->owner = btrfs_header_owner(buf);
1500 ref->generation = btrfs_header_generation(buf);
1501 ref->nritems = nr_extents;
1502 info = ref->extents;
1504 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1505 u64 disk_bytenr;
1506 btrfs_item_key_to_cpu(buf, &key, i);
1507 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1508 continue;
1509 fi = btrfs_item_ptr(buf, i,
1510 struct btrfs_file_extent_item);
1511 if (btrfs_file_extent_type(buf, fi) ==
1512 BTRFS_FILE_EXTENT_INLINE)
1513 continue;
1514 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1515 if (disk_bytenr == 0)
1516 continue;
1518 info->bytenr = disk_bytenr;
1519 info->num_bytes =
1520 btrfs_file_extent_disk_num_bytes(buf, fi);
1521 info->objectid = key.objectid;
1522 info->offset = key.offset;
1523 info++;
1526 ret = btrfs_add_leaf_ref(root, ref, shared);
1527 if (ret == -EEXIST && shared) {
1528 struct btrfs_leaf_ref *old;
1529 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1530 BUG_ON(!old);
1531 btrfs_remove_leaf_ref(root, old);
1532 btrfs_free_leaf_ref(root, old);
1533 ret = btrfs_add_leaf_ref(root, ref, shared);
1535 WARN_ON(ret);
1536 btrfs_free_leaf_ref(root, ref);
1538 out:
1539 return ret;
1542 /* when a block goes through cow, we update the reference counts of
1543 * everything that block points to. The internal pointers of the block
1544 * can be in just about any order, and it is likely to have clusters of
1545 * things that are close together and clusters of things that are not.
1547 * To help reduce the seeks that come with updating all of these reference
1548 * counts, sort them by byte number before actual updates are done.
1550 * struct refsort is used to match byte number to slot in the btree block.
1551 * we sort based on the byte number and then use the slot to actually
1552 * find the item.
1554 * struct refsort is smaller than strcut btrfs_item and smaller than
1555 * struct btrfs_key_ptr. Since we're currently limited to the page size
1556 * for a btree block, there's no way for a kmalloc of refsorts for a
1557 * single node to be bigger than a page.
1559 struct refsort {
1560 u64 bytenr;
1561 u32 slot;
1565 * for passing into sort()
1567 static int refsort_cmp(const void *a_void, const void *b_void)
1569 const struct refsort *a = a_void;
1570 const struct refsort *b = b_void;
1572 if (a->bytenr < b->bytenr)
1573 return -1;
1574 if (a->bytenr > b->bytenr)
1575 return 1;
1576 return 0;
1580 noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1581 struct btrfs_root *root,
1582 struct extent_buffer *orig_buf,
1583 struct extent_buffer *buf, u32 *nr_extents)
1585 u64 bytenr;
1586 u64 ref_root;
1587 u64 orig_root;
1588 u64 ref_generation;
1589 u64 orig_generation;
1590 struct refsort *sorted;
1591 u32 nritems;
1592 u32 nr_file_extents = 0;
1593 struct btrfs_key key;
1594 struct btrfs_file_extent_item *fi;
1595 int i;
1596 int level;
1597 int ret = 0;
1598 int faili = 0;
1599 int refi = 0;
1600 int slot;
1601 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1602 u64, u64, u64, u64, u64, u64, u64, u64);
1604 ref_root = btrfs_header_owner(buf);
1605 ref_generation = btrfs_header_generation(buf);
1606 orig_root = btrfs_header_owner(orig_buf);
1607 orig_generation = btrfs_header_generation(orig_buf);
1609 nritems = btrfs_header_nritems(buf);
1610 level = btrfs_header_level(buf);
1612 sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
1613 BUG_ON(!sorted);
1615 if (root->ref_cows) {
1616 process_func = __btrfs_inc_extent_ref;
1617 } else {
1618 if (level == 0 &&
1619 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1620 goto out;
1621 if (level != 0 &&
1622 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1623 goto out;
1624 process_func = __btrfs_update_extent_ref;
1628 * we make two passes through the items. In the first pass we
1629 * only record the byte number and slot. Then we sort based on
1630 * byte number and do the actual work based on the sorted results
1632 for (i = 0; i < nritems; i++) {
1633 cond_resched();
1634 if (level == 0) {
1635 btrfs_item_key_to_cpu(buf, &key, i);
1636 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1637 continue;
1638 fi = btrfs_item_ptr(buf, i,
1639 struct btrfs_file_extent_item);
1640 if (btrfs_file_extent_type(buf, fi) ==
1641 BTRFS_FILE_EXTENT_INLINE)
1642 continue;
1643 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1644 if (bytenr == 0)
1645 continue;
1647 nr_file_extents++;
1648 sorted[refi].bytenr = bytenr;
1649 sorted[refi].slot = i;
1650 refi++;
1651 } else {
1652 bytenr = btrfs_node_blockptr(buf, i);
1653 sorted[refi].bytenr = bytenr;
1654 sorted[refi].slot = i;
1655 refi++;
1659 * if refi == 0, we didn't actually put anything into the sorted
1660 * array and we're done
1662 if (refi == 0)
1663 goto out;
1665 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1667 for (i = 0; i < refi; i++) {
1668 cond_resched();
1669 slot = sorted[i].slot;
1670 bytenr = sorted[i].bytenr;
1672 if (level == 0) {
1673 btrfs_item_key_to_cpu(buf, &key, slot);
1675 ret = process_func(trans, root, bytenr,
1676 orig_buf->start, buf->start,
1677 orig_root, ref_root,
1678 orig_generation, ref_generation,
1679 key.objectid);
1681 if (ret) {
1682 faili = slot;
1683 WARN_ON(1);
1684 goto fail;
1686 } else {
1687 ret = process_func(trans, root, bytenr,
1688 orig_buf->start, buf->start,
1689 orig_root, ref_root,
1690 orig_generation, ref_generation,
1691 level - 1);
1692 if (ret) {
1693 faili = slot;
1694 WARN_ON(1);
1695 goto fail;
1699 out:
1700 kfree(sorted);
1701 if (nr_extents) {
1702 if (level == 0)
1703 *nr_extents = nr_file_extents;
1704 else
1705 *nr_extents = nritems;
1707 return 0;
1708 fail:
1709 kfree(sorted);
1710 WARN_ON(1);
1711 return ret;
1714 int btrfs_update_ref(struct btrfs_trans_handle *trans,
1715 struct btrfs_root *root, struct extent_buffer *orig_buf,
1716 struct extent_buffer *buf, int start_slot, int nr)
1719 u64 bytenr;
1720 u64 ref_root;
1721 u64 orig_root;
1722 u64 ref_generation;
1723 u64 orig_generation;
1724 struct btrfs_key key;
1725 struct btrfs_file_extent_item *fi;
1726 int i;
1727 int ret;
1728 int slot;
1729 int level;
1731 BUG_ON(start_slot < 0);
1732 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1734 ref_root = btrfs_header_owner(buf);
1735 ref_generation = btrfs_header_generation(buf);
1736 orig_root = btrfs_header_owner(orig_buf);
1737 orig_generation = btrfs_header_generation(orig_buf);
1738 level = btrfs_header_level(buf);
1740 if (!root->ref_cows) {
1741 if (level == 0 &&
1742 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1743 return 0;
1744 if (level != 0 &&
1745 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1746 return 0;
1749 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1750 cond_resched();
1751 if (level == 0) {
1752 btrfs_item_key_to_cpu(buf, &key, slot);
1753 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1754 continue;
1755 fi = btrfs_item_ptr(buf, slot,
1756 struct btrfs_file_extent_item);
1757 if (btrfs_file_extent_type(buf, fi) ==
1758 BTRFS_FILE_EXTENT_INLINE)
1759 continue;
1760 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1761 if (bytenr == 0)
1762 continue;
1763 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1764 orig_buf->start, buf->start,
1765 orig_root, ref_root,
1766 orig_generation, ref_generation,
1767 key.objectid);
1768 if (ret)
1769 goto fail;
1770 } else {
1771 bytenr = btrfs_node_blockptr(buf, slot);
1772 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1773 orig_buf->start, buf->start,
1774 orig_root, ref_root,
1775 orig_generation, ref_generation,
1776 level - 1);
1777 if (ret)
1778 goto fail;
1781 return 0;
1782 fail:
1783 WARN_ON(1);
1784 return -1;
1787 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1788 struct btrfs_root *root,
1789 struct btrfs_path *path,
1790 struct btrfs_block_group_cache *cache)
1792 int ret;
1793 int pending_ret;
1794 struct btrfs_root *extent_root = root->fs_info->extent_root;
1795 unsigned long bi;
1796 struct extent_buffer *leaf;
1798 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1799 if (ret < 0)
1800 goto fail;
1801 BUG_ON(ret);
1803 leaf = path->nodes[0];
1804 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1805 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1806 btrfs_mark_buffer_dirty(leaf);
1807 btrfs_release_path(extent_root, path);
1808 fail:
1809 finish_current_insert(trans, extent_root, 0);
1810 pending_ret = del_pending_extents(trans, extent_root, 0);
1811 if (ret)
1812 return ret;
1813 if (pending_ret)
1814 return pending_ret;
1815 return 0;
1819 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1820 struct btrfs_root *root)
1822 struct btrfs_block_group_cache *cache, *entry;
1823 struct rb_node *n;
1824 int err = 0;
1825 int werr = 0;
1826 struct btrfs_path *path;
1827 u64 last = 0;
1829 path = btrfs_alloc_path();
1830 if (!path)
1831 return -ENOMEM;
1833 while (1) {
1834 cache = NULL;
1835 spin_lock(&root->fs_info->block_group_cache_lock);
1836 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1837 n; n = rb_next(n)) {
1838 entry = rb_entry(n, struct btrfs_block_group_cache,
1839 cache_node);
1840 if (entry->dirty) {
1841 cache = entry;
1842 break;
1845 spin_unlock(&root->fs_info->block_group_cache_lock);
1847 if (!cache)
1848 break;
1850 cache->dirty = 0;
1851 last += cache->key.offset;
1853 err = write_one_cache_group(trans, root,
1854 path, cache);
1856 * if we fail to write the cache group, we want
1857 * to keep it marked dirty in hopes that a later
1858 * write will work
1860 if (err) {
1861 werr = err;
1862 continue;
1865 btrfs_free_path(path);
1866 return werr;
1869 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
1871 struct btrfs_block_group_cache *block_group;
1872 int readonly = 0;
1874 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1875 if (!block_group || block_group->ro)
1876 readonly = 1;
1877 if (block_group)
1878 put_block_group(block_group);
1879 return readonly;
1882 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1883 u64 total_bytes, u64 bytes_used,
1884 struct btrfs_space_info **space_info)
1886 struct btrfs_space_info *found;
1888 found = __find_space_info(info, flags);
1889 if (found) {
1890 spin_lock(&found->lock);
1891 found->total_bytes += total_bytes;
1892 found->bytes_used += bytes_used;
1893 found->full = 0;
1894 spin_unlock(&found->lock);
1895 *space_info = found;
1896 return 0;
1898 found = kzalloc(sizeof(*found), GFP_NOFS);
1899 if (!found)
1900 return -ENOMEM;
1902 list_add(&found->list, &info->space_info);
1903 INIT_LIST_HEAD(&found->block_groups);
1904 init_rwsem(&found->groups_sem);
1905 spin_lock_init(&found->lock);
1906 found->flags = flags;
1907 found->total_bytes = total_bytes;
1908 found->bytes_used = bytes_used;
1909 found->bytes_pinned = 0;
1910 found->bytes_reserved = 0;
1911 found->bytes_readonly = 0;
1912 found->full = 0;
1913 found->force_alloc = 0;
1914 *space_info = found;
1915 return 0;
1918 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1920 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1921 BTRFS_BLOCK_GROUP_RAID1 |
1922 BTRFS_BLOCK_GROUP_RAID10 |
1923 BTRFS_BLOCK_GROUP_DUP);
1924 if (extra_flags) {
1925 if (flags & BTRFS_BLOCK_GROUP_DATA)
1926 fs_info->avail_data_alloc_bits |= extra_flags;
1927 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1928 fs_info->avail_metadata_alloc_bits |= extra_flags;
1929 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1930 fs_info->avail_system_alloc_bits |= extra_flags;
1934 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1936 spin_lock(&cache->space_info->lock);
1937 spin_lock(&cache->lock);
1938 if (!cache->ro) {
1939 cache->space_info->bytes_readonly += cache->key.offset -
1940 btrfs_block_group_used(&cache->item);
1941 cache->ro = 1;
1943 spin_unlock(&cache->lock);
1944 spin_unlock(&cache->space_info->lock);
1947 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1949 u64 num_devices = root->fs_info->fs_devices->rw_devices;
1951 if (num_devices == 1)
1952 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1953 if (num_devices < 4)
1954 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1956 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1957 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1958 BTRFS_BLOCK_GROUP_RAID10))) {
1959 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1962 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1963 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1964 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1967 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1968 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1969 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1970 (flags & BTRFS_BLOCK_GROUP_DUP)))
1971 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1972 return flags;
1975 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1976 struct btrfs_root *extent_root, u64 alloc_bytes,
1977 u64 flags, int force)
1979 struct btrfs_space_info *space_info;
1980 u64 thresh;
1981 int ret = 0;
1983 mutex_lock(&extent_root->fs_info->chunk_mutex);
1985 flags = btrfs_reduce_alloc_profile(extent_root, flags);
1987 space_info = __find_space_info(extent_root->fs_info, flags);
1988 if (!space_info) {
1989 ret = update_space_info(extent_root->fs_info, flags,
1990 0, 0, &space_info);
1991 BUG_ON(ret);
1993 BUG_ON(!space_info);
1995 spin_lock(&space_info->lock);
1996 if (space_info->force_alloc) {
1997 force = 1;
1998 space_info->force_alloc = 0;
2000 if (space_info->full) {
2001 spin_unlock(&space_info->lock);
2002 goto out;
2005 thresh = space_info->total_bytes - space_info->bytes_readonly;
2006 thresh = div_factor(thresh, 6);
2007 if (!force &&
2008 (space_info->bytes_used + space_info->bytes_pinned +
2009 space_info->bytes_reserved + alloc_bytes) < thresh) {
2010 spin_unlock(&space_info->lock);
2011 goto out;
2013 spin_unlock(&space_info->lock);
2015 ret = btrfs_alloc_chunk(trans, extent_root, flags);
2016 if (ret)
2017 space_info->full = 1;
2018 out:
2019 mutex_unlock(&extent_root->fs_info->chunk_mutex);
2020 return ret;
2023 static int update_block_group(struct btrfs_trans_handle *trans,
2024 struct btrfs_root *root,
2025 u64 bytenr, u64 num_bytes, int alloc,
2026 int mark_free)
2028 struct btrfs_block_group_cache *cache;
2029 struct btrfs_fs_info *info = root->fs_info;
2030 u64 total = num_bytes;
2031 u64 old_val;
2032 u64 byte_in_group;
2034 while (total) {
2035 cache = btrfs_lookup_block_group(info, bytenr);
2036 if (!cache)
2037 return -1;
2038 byte_in_group = bytenr - cache->key.objectid;
2039 WARN_ON(byte_in_group > cache->key.offset);
2041 spin_lock(&cache->space_info->lock);
2042 spin_lock(&cache->lock);
2043 cache->dirty = 1;
2044 old_val = btrfs_block_group_used(&cache->item);
2045 num_bytes = min(total, cache->key.offset - byte_in_group);
2046 if (alloc) {
2047 old_val += num_bytes;
2048 cache->space_info->bytes_used += num_bytes;
2049 if (cache->ro)
2050 cache->space_info->bytes_readonly -= num_bytes;
2051 btrfs_set_block_group_used(&cache->item, old_val);
2052 spin_unlock(&cache->lock);
2053 spin_unlock(&cache->space_info->lock);
2054 } else {
2055 old_val -= num_bytes;
2056 cache->space_info->bytes_used -= num_bytes;
2057 if (cache->ro)
2058 cache->space_info->bytes_readonly += num_bytes;
2059 btrfs_set_block_group_used(&cache->item, old_val);
2060 spin_unlock(&cache->lock);
2061 spin_unlock(&cache->space_info->lock);
2062 if (mark_free) {
2063 int ret;
2065 ret = btrfs_discard_extent(root, bytenr,
2066 num_bytes);
2067 WARN_ON(ret);
2069 ret = btrfs_add_free_space(cache, bytenr,
2070 num_bytes);
2071 WARN_ON(ret);
2074 put_block_group(cache);
2075 total -= num_bytes;
2076 bytenr += num_bytes;
2078 return 0;
2081 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
2083 struct btrfs_block_group_cache *cache;
2084 u64 bytenr;
2086 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
2087 if (!cache)
2088 return 0;
2090 bytenr = cache->key.objectid;
2091 put_block_group(cache);
2093 return bytenr;
2096 int btrfs_update_pinned_extents(struct btrfs_root *root,
2097 u64 bytenr, u64 num, int pin)
2099 u64 len;
2100 struct btrfs_block_group_cache *cache;
2101 struct btrfs_fs_info *fs_info = root->fs_info;
2103 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
2104 if (pin) {
2105 set_extent_dirty(&fs_info->pinned_extents,
2106 bytenr, bytenr + num - 1, GFP_NOFS);
2107 } else {
2108 clear_extent_dirty(&fs_info->pinned_extents,
2109 bytenr, bytenr + num - 1, GFP_NOFS);
2111 while (num > 0) {
2112 cache = btrfs_lookup_block_group(fs_info, bytenr);
2113 BUG_ON(!cache);
2114 len = min(num, cache->key.offset -
2115 (bytenr - cache->key.objectid));
2116 if (pin) {
2117 spin_lock(&cache->space_info->lock);
2118 spin_lock(&cache->lock);
2119 cache->pinned += len;
2120 cache->space_info->bytes_pinned += len;
2121 spin_unlock(&cache->lock);
2122 spin_unlock(&cache->space_info->lock);
2123 fs_info->total_pinned += len;
2124 } else {
2125 spin_lock(&cache->space_info->lock);
2126 spin_lock(&cache->lock);
2127 cache->pinned -= len;
2128 cache->space_info->bytes_pinned -= len;
2129 spin_unlock(&cache->lock);
2130 spin_unlock(&cache->space_info->lock);
2131 fs_info->total_pinned -= len;
2132 if (cache->cached)
2133 btrfs_add_free_space(cache, bytenr, len);
2135 put_block_group(cache);
2136 bytenr += len;
2137 num -= len;
2139 return 0;
2142 static int update_reserved_extents(struct btrfs_root *root,
2143 u64 bytenr, u64 num, int reserve)
2145 u64 len;
2146 struct btrfs_block_group_cache *cache;
2147 struct btrfs_fs_info *fs_info = root->fs_info;
2149 while (num > 0) {
2150 cache = btrfs_lookup_block_group(fs_info, bytenr);
2151 BUG_ON(!cache);
2152 len = min(num, cache->key.offset -
2153 (bytenr - cache->key.objectid));
2155 spin_lock(&cache->space_info->lock);
2156 spin_lock(&cache->lock);
2157 if (reserve) {
2158 cache->reserved += len;
2159 cache->space_info->bytes_reserved += len;
2160 } else {
2161 cache->reserved -= len;
2162 cache->space_info->bytes_reserved -= len;
2164 spin_unlock(&cache->lock);
2165 spin_unlock(&cache->space_info->lock);
2166 put_block_group(cache);
2167 bytenr += len;
2168 num -= len;
2170 return 0;
2173 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2175 u64 last = 0;
2176 u64 start;
2177 u64 end;
2178 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2179 int ret;
2181 mutex_lock(&root->fs_info->pinned_mutex);
2182 while (1) {
2183 ret = find_first_extent_bit(pinned_extents, last,
2184 &start, &end, EXTENT_DIRTY);
2185 if (ret)
2186 break;
2187 set_extent_dirty(copy, start, end, GFP_NOFS);
2188 last = end + 1;
2190 mutex_unlock(&root->fs_info->pinned_mutex);
2191 return 0;
2194 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2195 struct btrfs_root *root,
2196 struct extent_io_tree *unpin)
2198 u64 start;
2199 u64 end;
2200 int ret;
2202 mutex_lock(&root->fs_info->pinned_mutex);
2203 while (1) {
2204 ret = find_first_extent_bit(unpin, 0, &start, &end,
2205 EXTENT_DIRTY);
2206 if (ret)
2207 break;
2209 ret = btrfs_discard_extent(root, start, end + 1 - start);
2211 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2212 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2214 if (need_resched()) {
2215 mutex_unlock(&root->fs_info->pinned_mutex);
2216 cond_resched();
2217 mutex_lock(&root->fs_info->pinned_mutex);
2220 mutex_unlock(&root->fs_info->pinned_mutex);
2221 return ret;
2224 static int finish_current_insert(struct btrfs_trans_handle *trans,
2225 struct btrfs_root *extent_root, int all)
2227 u64 start;
2228 u64 end;
2229 u64 priv;
2230 u64 search = 0;
2231 struct btrfs_fs_info *info = extent_root->fs_info;
2232 struct btrfs_path *path;
2233 struct pending_extent_op *extent_op, *tmp;
2234 struct list_head insert_list, update_list;
2235 int ret;
2236 int num_inserts = 0, max_inserts, restart = 0;
2238 path = btrfs_alloc_path();
2239 INIT_LIST_HEAD(&insert_list);
2240 INIT_LIST_HEAD(&update_list);
2242 max_inserts = extent_root->leafsize /
2243 (2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
2244 sizeof(struct btrfs_extent_ref) +
2245 sizeof(struct btrfs_extent_item));
2246 again:
2247 mutex_lock(&info->extent_ins_mutex);
2248 while (1) {
2249 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2250 &end, EXTENT_WRITEBACK);
2251 if (ret) {
2252 if (restart && !num_inserts &&
2253 list_empty(&update_list)) {
2254 restart = 0;
2255 search = 0;
2256 continue;
2258 break;
2261 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
2262 if (!ret) {
2263 if (all)
2264 restart = 1;
2265 search = end + 1;
2266 if (need_resched()) {
2267 mutex_unlock(&info->extent_ins_mutex);
2268 cond_resched();
2269 mutex_lock(&info->extent_ins_mutex);
2271 continue;
2274 ret = get_state_private(&info->extent_ins, start, &priv);
2275 BUG_ON(ret);
2276 extent_op = (struct pending_extent_op *)(unsigned long) priv;
2278 if (extent_op->type == PENDING_EXTENT_INSERT) {
2279 num_inserts++;
2280 list_add_tail(&extent_op->list, &insert_list);
2281 search = end + 1;
2282 if (num_inserts == max_inserts) {
2283 restart = 1;
2284 break;
2286 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
2287 list_add_tail(&extent_op->list, &update_list);
2288 search = end + 1;
2289 } else {
2290 BUG();
2295 * process the update list, clear the writeback bit for it, and if
2296 * somebody marked this thing for deletion then just unlock it and be
2297 * done, the free_extents will handle it
2299 list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
2300 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2301 extent_op->bytenr + extent_op->num_bytes - 1,
2302 EXTENT_WRITEBACK, GFP_NOFS);
2303 if (extent_op->del) {
2304 list_del_init(&extent_op->list);
2305 unlock_extent(&info->extent_ins, extent_op->bytenr,
2306 extent_op->bytenr + extent_op->num_bytes
2307 - 1, GFP_NOFS);
2308 kfree(extent_op);
2311 mutex_unlock(&info->extent_ins_mutex);
2314 * still have things left on the update list, go ahead an update
2315 * everything
2317 if (!list_empty(&update_list)) {
2318 ret = update_backrefs(trans, extent_root, path, &update_list);
2319 BUG_ON(ret);
2321 /* we may have COW'ed new blocks, so lets start over */
2322 if (all)
2323 restart = 1;
2327 * if no inserts need to be done, but we skipped some extents and we
2328 * need to make sure everything is cleaned then reset everything and
2329 * go back to the beginning
2331 if (!num_inserts && restart) {
2332 search = 0;
2333 restart = 0;
2334 INIT_LIST_HEAD(&update_list);
2335 INIT_LIST_HEAD(&insert_list);
2336 goto again;
2337 } else if (!num_inserts) {
2338 goto out;
2342 * process the insert extents list. Again if we are deleting this
2343 * extent, then just unlock it, pin down the bytes if need be, and be
2344 * done with it. Saves us from having to actually insert the extent
2345 * into the tree and then subsequently come along and delete it
2347 mutex_lock(&info->extent_ins_mutex);
2348 list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
2349 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2350 extent_op->bytenr + extent_op->num_bytes - 1,
2351 EXTENT_WRITEBACK, GFP_NOFS);
2352 if (extent_op->del) {
2353 u64 used;
2354 list_del_init(&extent_op->list);
2355 unlock_extent(&info->extent_ins, extent_op->bytenr,
2356 extent_op->bytenr + extent_op->num_bytes
2357 - 1, GFP_NOFS);
2359 mutex_lock(&extent_root->fs_info->pinned_mutex);
2360 ret = pin_down_bytes(trans, extent_root,
2361 extent_op->bytenr,
2362 extent_op->num_bytes, 0);
2363 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2365 spin_lock(&info->delalloc_lock);
2366 used = btrfs_super_bytes_used(&info->super_copy);
2367 btrfs_set_super_bytes_used(&info->super_copy,
2368 used - extent_op->num_bytes);
2369 used = btrfs_root_used(&extent_root->root_item);
2370 btrfs_set_root_used(&extent_root->root_item,
2371 used - extent_op->num_bytes);
2372 spin_unlock(&info->delalloc_lock);
2374 ret = update_block_group(trans, extent_root,
2375 extent_op->bytenr,
2376 extent_op->num_bytes,
2377 0, ret > 0);
2378 BUG_ON(ret);
2379 kfree(extent_op);
2380 num_inserts--;
2383 mutex_unlock(&info->extent_ins_mutex);
2385 ret = insert_extents(trans, extent_root, path, &insert_list,
2386 num_inserts);
2387 BUG_ON(ret);
2390 * if restart is set for whatever reason we need to go back and start
2391 * searching through the pending list again.
2393 * We just inserted some extents, which could have resulted in new
2394 * blocks being allocated, which would result in new blocks needing
2395 * updates, so if all is set we _must_ restart to get the updated
2396 * blocks.
2398 if (restart || all) {
2399 INIT_LIST_HEAD(&insert_list);
2400 INIT_LIST_HEAD(&update_list);
2401 search = 0;
2402 restart = 0;
2403 num_inserts = 0;
2404 goto again;
2406 out:
2407 btrfs_free_path(path);
2408 return 0;
2411 static int pin_down_bytes(struct btrfs_trans_handle *trans,
2412 struct btrfs_root *root,
2413 u64 bytenr, u64 num_bytes, int is_data)
2415 int err = 0;
2416 struct extent_buffer *buf;
2418 if (is_data)
2419 goto pinit;
2421 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2422 if (!buf)
2423 goto pinit;
2425 /* we can reuse a block if it hasn't been written
2426 * and it is from this transaction. We can't
2427 * reuse anything from the tree log root because
2428 * it has tiny sub-transactions.
2430 if (btrfs_buffer_uptodate(buf, 0) &&
2431 btrfs_try_tree_lock(buf)) {
2432 u64 header_owner = btrfs_header_owner(buf);
2433 u64 header_transid = btrfs_header_generation(buf);
2434 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2435 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2436 header_transid == trans->transid &&
2437 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2438 clean_tree_block(NULL, root, buf);
2439 btrfs_tree_unlock(buf);
2440 free_extent_buffer(buf);
2441 return 1;
2443 btrfs_tree_unlock(buf);
2445 free_extent_buffer(buf);
2446 pinit:
2447 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2449 BUG_ON(err < 0);
2450 return 0;
2454 * remove an extent from the root, returns 0 on success
2456 static int __free_extent(struct btrfs_trans_handle *trans,
2457 struct btrfs_root *root,
2458 u64 bytenr, u64 num_bytes, u64 parent,
2459 u64 root_objectid, u64 ref_generation,
2460 u64 owner_objectid, int pin, int mark_free)
2462 struct btrfs_path *path;
2463 struct btrfs_key key;
2464 struct btrfs_fs_info *info = root->fs_info;
2465 struct btrfs_root *extent_root = info->extent_root;
2466 struct extent_buffer *leaf;
2467 int ret;
2468 int extent_slot = 0;
2469 int found_extent = 0;
2470 int num_to_del = 1;
2471 struct btrfs_extent_item *ei;
2472 u32 refs;
2474 key.objectid = bytenr;
2475 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2476 key.offset = num_bytes;
2477 path = btrfs_alloc_path();
2478 if (!path)
2479 return -ENOMEM;
2481 path->reada = 1;
2482 ret = lookup_extent_backref(trans, extent_root, path,
2483 bytenr, parent, root_objectid,
2484 ref_generation, owner_objectid, 1);
2485 if (ret == 0) {
2486 struct btrfs_key found_key;
2487 extent_slot = path->slots[0];
2488 while (extent_slot > 0) {
2489 extent_slot--;
2490 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2491 extent_slot);
2492 if (found_key.objectid != bytenr)
2493 break;
2494 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2495 found_key.offset == num_bytes) {
2496 found_extent = 1;
2497 break;
2499 if (path->slots[0] - extent_slot > 5)
2500 break;
2502 if (!found_extent) {
2503 ret = remove_extent_backref(trans, extent_root, path);
2504 BUG_ON(ret);
2505 btrfs_release_path(extent_root, path);
2506 ret = btrfs_search_slot(trans, extent_root,
2507 &key, path, -1, 1);
2508 if (ret) {
2509 printk(KERN_ERR "umm, got %d back from search"
2510 ", was looking for %llu\n", ret,
2511 (unsigned long long)bytenr);
2512 btrfs_print_leaf(extent_root, path->nodes[0]);
2514 BUG_ON(ret);
2515 extent_slot = path->slots[0];
2517 } else {
2518 btrfs_print_leaf(extent_root, path->nodes[0]);
2519 WARN_ON(1);
2520 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2521 "root %llu gen %llu owner %llu\n",
2522 (unsigned long long)bytenr,
2523 (unsigned long long)root_objectid,
2524 (unsigned long long)ref_generation,
2525 (unsigned long long)owner_objectid);
2528 leaf = path->nodes[0];
2529 ei = btrfs_item_ptr(leaf, extent_slot,
2530 struct btrfs_extent_item);
2531 refs = btrfs_extent_refs(leaf, ei);
2532 BUG_ON(refs == 0);
2533 refs -= 1;
2534 btrfs_set_extent_refs(leaf, ei, refs);
2536 btrfs_mark_buffer_dirty(leaf);
2538 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
2539 struct btrfs_extent_ref *ref;
2540 ref = btrfs_item_ptr(leaf, path->slots[0],
2541 struct btrfs_extent_ref);
2542 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
2543 /* if the back ref and the extent are next to each other
2544 * they get deleted below in one shot
2546 path->slots[0] = extent_slot;
2547 num_to_del = 2;
2548 } else if (found_extent) {
2549 /* otherwise delete the extent back ref */
2550 ret = remove_extent_backref(trans, extent_root, path);
2551 BUG_ON(ret);
2552 /* if refs are 0, we need to setup the path for deletion */
2553 if (refs == 0) {
2554 btrfs_release_path(extent_root, path);
2555 ret = btrfs_search_slot(trans, extent_root, &key, path,
2556 -1, 1);
2557 BUG_ON(ret);
2561 if (refs == 0) {
2562 u64 super_used;
2563 u64 root_used;
2565 if (pin) {
2566 mutex_lock(&root->fs_info->pinned_mutex);
2567 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
2568 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
2569 mutex_unlock(&root->fs_info->pinned_mutex);
2570 if (ret > 0)
2571 mark_free = 1;
2572 BUG_ON(ret < 0);
2574 /* block accounting for super block */
2575 spin_lock(&info->delalloc_lock);
2576 super_used = btrfs_super_bytes_used(&info->super_copy);
2577 btrfs_set_super_bytes_used(&info->super_copy,
2578 super_used - num_bytes);
2580 /* block accounting for root item */
2581 root_used = btrfs_root_used(&root->root_item);
2582 btrfs_set_root_used(&root->root_item,
2583 root_used - num_bytes);
2584 spin_unlock(&info->delalloc_lock);
2585 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2586 num_to_del);
2587 BUG_ON(ret);
2588 btrfs_release_path(extent_root, path);
2590 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2591 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2592 BUG_ON(ret);
2595 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2596 mark_free);
2597 BUG_ON(ret);
2599 btrfs_free_path(path);
2600 finish_current_insert(trans, extent_root, 0);
2601 return ret;
2605 * find all the blocks marked as pending in the radix tree and remove
2606 * them from the extent map
2608 static int del_pending_extents(struct btrfs_trans_handle *trans,
2609 struct btrfs_root *extent_root, int all)
2611 int ret;
2612 int err = 0;
2613 u64 start;
2614 u64 end;
2615 u64 priv;
2616 u64 search = 0;
2617 int nr = 0, skipped = 0;
2618 struct extent_io_tree *pending_del;
2619 struct extent_io_tree *extent_ins;
2620 struct pending_extent_op *extent_op;
2621 struct btrfs_fs_info *info = extent_root->fs_info;
2622 struct list_head delete_list;
2624 INIT_LIST_HEAD(&delete_list);
2625 extent_ins = &extent_root->fs_info->extent_ins;
2626 pending_del = &extent_root->fs_info->pending_del;
2628 again:
2629 mutex_lock(&info->extent_ins_mutex);
2630 while (1) {
2631 ret = find_first_extent_bit(pending_del, search, &start, &end,
2632 EXTENT_WRITEBACK);
2633 if (ret) {
2634 if (all && skipped && !nr) {
2635 search = 0;
2636 skipped = 0;
2637 continue;
2639 mutex_unlock(&info->extent_ins_mutex);
2640 break;
2643 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
2644 if (!ret) {
2645 search = end+1;
2646 skipped = 1;
2648 if (need_resched()) {
2649 mutex_unlock(&info->extent_ins_mutex);
2650 cond_resched();
2651 mutex_lock(&info->extent_ins_mutex);
2654 continue;
2656 BUG_ON(ret < 0);
2658 ret = get_state_private(pending_del, start, &priv);
2659 BUG_ON(ret);
2660 extent_op = (struct pending_extent_op *)(unsigned long)priv;
2662 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
2663 GFP_NOFS);
2664 if (!test_range_bit(extent_ins, start, end,
2665 EXTENT_WRITEBACK, 0)) {
2666 list_add_tail(&extent_op->list, &delete_list);
2667 nr++;
2668 } else {
2669 kfree(extent_op);
2671 ret = get_state_private(&info->extent_ins, start,
2672 &priv);
2673 BUG_ON(ret);
2674 extent_op = (struct pending_extent_op *)
2675 (unsigned long)priv;
2677 clear_extent_bits(&info->extent_ins, start, end,
2678 EXTENT_WRITEBACK, GFP_NOFS);
2680 if (extent_op->type == PENDING_BACKREF_UPDATE) {
2681 list_add_tail(&extent_op->list, &delete_list);
2682 search = end + 1;
2683 nr++;
2684 continue;
2687 mutex_lock(&extent_root->fs_info->pinned_mutex);
2688 ret = pin_down_bytes(trans, extent_root, start,
2689 end + 1 - start, 0);
2690 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2692 ret = update_block_group(trans, extent_root, start,
2693 end + 1 - start, 0, ret > 0);
2695 unlock_extent(extent_ins, start, end, GFP_NOFS);
2696 BUG_ON(ret);
2697 kfree(extent_op);
2699 if (ret)
2700 err = ret;
2702 search = end + 1;
2704 if (need_resched()) {
2705 mutex_unlock(&info->extent_ins_mutex);
2706 cond_resched();
2707 mutex_lock(&info->extent_ins_mutex);
2711 if (nr) {
2712 ret = free_extents(trans, extent_root, &delete_list);
2713 BUG_ON(ret);
2716 if (all && skipped) {
2717 INIT_LIST_HEAD(&delete_list);
2718 search = 0;
2719 nr = 0;
2720 goto again;
2723 if (!err)
2724 finish_current_insert(trans, extent_root, 0);
2725 return err;
2729 * remove an extent from the root, returns 0 on success
2731 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2732 struct btrfs_root *root,
2733 u64 bytenr, u64 num_bytes, u64 parent,
2734 u64 root_objectid, u64 ref_generation,
2735 u64 owner_objectid, int pin)
2737 struct btrfs_root *extent_root = root->fs_info->extent_root;
2738 int pending_ret;
2739 int ret;
2741 WARN_ON(num_bytes < root->sectorsize);
2742 if (root == extent_root) {
2743 struct pending_extent_op *extent_op = NULL;
2745 mutex_lock(&root->fs_info->extent_ins_mutex);
2746 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
2747 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
2748 u64 priv;
2749 ret = get_state_private(&root->fs_info->extent_ins,
2750 bytenr, &priv);
2751 BUG_ON(ret);
2752 extent_op = (struct pending_extent_op *)
2753 (unsigned long)priv;
2755 extent_op->del = 1;
2756 if (extent_op->type == PENDING_EXTENT_INSERT) {
2757 mutex_unlock(&root->fs_info->extent_ins_mutex);
2758 return 0;
2762 if (extent_op) {
2763 ref_generation = extent_op->orig_generation;
2764 parent = extent_op->orig_parent;
2767 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2768 BUG_ON(!extent_op);
2770 extent_op->type = PENDING_EXTENT_DELETE;
2771 extent_op->bytenr = bytenr;
2772 extent_op->num_bytes = num_bytes;
2773 extent_op->parent = parent;
2774 extent_op->orig_parent = parent;
2775 extent_op->generation = ref_generation;
2776 extent_op->orig_generation = ref_generation;
2777 extent_op->level = (int)owner_objectid;
2778 INIT_LIST_HEAD(&extent_op->list);
2779 extent_op->del = 0;
2781 set_extent_bits(&root->fs_info->pending_del,
2782 bytenr, bytenr + num_bytes - 1,
2783 EXTENT_WRITEBACK, GFP_NOFS);
2784 set_state_private(&root->fs_info->pending_del,
2785 bytenr, (unsigned long)extent_op);
2786 mutex_unlock(&root->fs_info->extent_ins_mutex);
2787 return 0;
2789 /* if metadata always pin */
2790 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2791 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2792 mutex_lock(&root->fs_info->pinned_mutex);
2793 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2794 mutex_unlock(&root->fs_info->pinned_mutex);
2795 update_reserved_extents(root, bytenr, num_bytes, 0);
2796 return 0;
2798 pin = 1;
2801 /* if data pin when any transaction has committed this */
2802 if (ref_generation != trans->transid)
2803 pin = 1;
2805 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2806 root_objectid, ref_generation,
2807 owner_objectid, pin, pin == 0);
2809 finish_current_insert(trans, root->fs_info->extent_root, 0);
2810 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2811 return ret ? ret : pending_ret;
2814 int btrfs_free_extent(struct btrfs_trans_handle *trans,
2815 struct btrfs_root *root,
2816 u64 bytenr, u64 num_bytes, u64 parent,
2817 u64 root_objectid, u64 ref_generation,
2818 u64 owner_objectid, int pin)
2820 int ret;
2822 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2823 root_objectid, ref_generation,
2824 owner_objectid, pin);
2825 return ret;
2828 static u64 stripe_align(struct btrfs_root *root, u64 val)
2830 u64 mask = ((u64)root->stripesize - 1);
2831 u64 ret = (val + mask) & ~mask;
2832 return ret;
2836 * walks the btree of allocated extents and find a hole of a given size.
2837 * The key ins is changed to record the hole:
2838 * ins->objectid == block start
2839 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2840 * ins->offset == number of blocks
2841 * Any available blocks before search_start are skipped.
2843 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2844 struct btrfs_root *orig_root,
2845 u64 num_bytes, u64 empty_size,
2846 u64 search_start, u64 search_end,
2847 u64 hint_byte, struct btrfs_key *ins,
2848 u64 exclude_start, u64 exclude_nr,
2849 int data)
2851 int ret = 0;
2852 struct btrfs_root *root = orig_root->fs_info->extent_root;
2853 u64 total_needed = num_bytes;
2854 u64 *last_ptr = NULL;
2855 u64 last_wanted = 0;
2856 struct btrfs_block_group_cache *block_group = NULL;
2857 int chunk_alloc_done = 0;
2858 int empty_cluster = 2 * 1024 * 1024;
2859 int allowed_chunk_alloc = 0;
2860 struct list_head *head = NULL, *cur = NULL;
2861 int loop = 0;
2862 int extra_loop = 0;
2863 struct btrfs_space_info *space_info;
2865 WARN_ON(num_bytes < root->sectorsize);
2866 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2867 ins->objectid = 0;
2868 ins->offset = 0;
2870 if (orig_root->ref_cows || empty_size)
2871 allowed_chunk_alloc = 1;
2873 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2874 last_ptr = &root->fs_info->last_alloc;
2875 empty_cluster = 64 * 1024;
2878 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2879 last_ptr = &root->fs_info->last_data_alloc;
2881 if (last_ptr) {
2882 if (*last_ptr) {
2883 hint_byte = *last_ptr;
2884 last_wanted = *last_ptr;
2885 } else
2886 empty_size += empty_cluster;
2887 } else {
2888 empty_cluster = 0;
2890 search_start = max(search_start, first_logical_byte(root, 0));
2891 search_start = max(search_start, hint_byte);
2893 if (last_wanted && search_start != last_wanted) {
2894 last_wanted = 0;
2895 empty_size += empty_cluster;
2898 total_needed += empty_size;
2899 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2900 if (!block_group)
2901 block_group = btrfs_lookup_first_block_group(root->fs_info,
2902 search_start);
2903 space_info = __find_space_info(root->fs_info, data);
2905 down_read(&space_info->groups_sem);
2906 while (1) {
2907 struct btrfs_free_space *free_space;
2909 * the only way this happens if our hint points to a block
2910 * group thats not of the proper type, while looping this
2911 * should never happen
2913 if (empty_size)
2914 extra_loop = 1;
2916 if (!block_group)
2917 goto new_group_no_lock;
2919 if (unlikely(!block_group->cached)) {
2920 mutex_lock(&block_group->cache_mutex);
2921 ret = cache_block_group(root, block_group);
2922 mutex_unlock(&block_group->cache_mutex);
2923 if (ret)
2924 break;
2927 mutex_lock(&block_group->alloc_mutex);
2928 if (unlikely(!block_group_bits(block_group, data)))
2929 goto new_group;
2931 if (unlikely(block_group->ro))
2932 goto new_group;
2934 free_space = btrfs_find_free_space(block_group, search_start,
2935 total_needed);
2936 if (free_space) {
2937 u64 start = block_group->key.objectid;
2938 u64 end = block_group->key.objectid +
2939 block_group->key.offset;
2941 search_start = stripe_align(root, free_space->offset);
2943 /* move on to the next group */
2944 if (search_start + num_bytes >= search_end)
2945 goto new_group;
2947 /* move on to the next group */
2948 if (search_start + num_bytes > end)
2949 goto new_group;
2951 if (last_wanted && search_start != last_wanted) {
2952 total_needed += empty_cluster;
2953 empty_size += empty_cluster;
2954 last_wanted = 0;
2956 * if search_start is still in this block group
2957 * then we just re-search this block group
2959 if (search_start >= start &&
2960 search_start < end) {
2961 mutex_unlock(&block_group->alloc_mutex);
2962 continue;
2965 /* else we go to the next block group */
2966 goto new_group;
2969 if (exclude_nr > 0 &&
2970 (search_start + num_bytes > exclude_start &&
2971 search_start < exclude_start + exclude_nr)) {
2972 search_start = exclude_start + exclude_nr;
2974 * if search_start is still in this block group
2975 * then we just re-search this block group
2977 if (search_start >= start &&
2978 search_start < end) {
2979 mutex_unlock(&block_group->alloc_mutex);
2980 last_wanted = 0;
2981 continue;
2984 /* else we go to the next block group */
2985 goto new_group;
2988 ins->objectid = search_start;
2989 ins->offset = num_bytes;
2991 btrfs_remove_free_space_lock(block_group, search_start,
2992 num_bytes);
2993 /* we are all good, lets return */
2994 mutex_unlock(&block_group->alloc_mutex);
2995 break;
2997 new_group:
2998 mutex_unlock(&block_group->alloc_mutex);
2999 put_block_group(block_group);
3000 block_group = NULL;
3001 new_group_no_lock:
3002 /* don't try to compare new allocations against the
3003 * last allocation any more
3005 last_wanted = 0;
3008 * Here's how this works.
3009 * loop == 0: we were searching a block group via a hint
3010 * and didn't find anything, so we start at
3011 * the head of the block groups and keep searching
3012 * loop == 1: we're searching through all of the block groups
3013 * if we hit the head again we have searched
3014 * all of the block groups for this space and we
3015 * need to try and allocate, if we cant error out.
3016 * loop == 2: we allocated more space and are looping through
3017 * all of the block groups again.
3019 if (loop == 0) {
3020 head = &space_info->block_groups;
3021 cur = head->next;
3022 loop++;
3023 } else if (loop == 1 && cur == head) {
3024 int keep_going;
3026 /* at this point we give up on the empty_size
3027 * allocations and just try to allocate the min
3028 * space.
3030 * The extra_loop field was set if an empty_size
3031 * allocation was attempted above, and if this
3032 * is try we need to try the loop again without
3033 * the additional empty_size.
3035 total_needed -= empty_size;
3036 empty_size = 0;
3037 keep_going = extra_loop;
3038 loop++;
3040 if (allowed_chunk_alloc && !chunk_alloc_done) {
3041 up_read(&space_info->groups_sem);
3042 ret = do_chunk_alloc(trans, root, num_bytes +
3043 2 * 1024 * 1024, data, 1);
3044 down_read(&space_info->groups_sem);
3045 if (ret < 0)
3046 goto loop_check;
3047 head = &space_info->block_groups;
3049 * we've allocated a new chunk, keep
3050 * trying
3052 keep_going = 1;
3053 chunk_alloc_done = 1;
3054 } else if (!allowed_chunk_alloc) {
3055 space_info->force_alloc = 1;
3057 loop_check:
3058 if (keep_going) {
3059 cur = head->next;
3060 extra_loop = 0;
3061 } else {
3062 break;
3064 } else if (cur == head) {
3065 break;
3068 block_group = list_entry(cur, struct btrfs_block_group_cache,
3069 list);
3070 atomic_inc(&block_group->count);
3072 search_start = block_group->key.objectid;
3073 cur = cur->next;
3076 /* we found what we needed */
3077 if (ins->objectid) {
3078 if (!(data & BTRFS_BLOCK_GROUP_DATA))
3079 trans->block_group = block_group->key.objectid;
3081 if (last_ptr)
3082 *last_ptr = ins->objectid + ins->offset;
3083 ret = 0;
3084 } else if (!ret) {
3085 printk(KERN_ERR "btrfs searching for %llu bytes, "
3086 "num_bytes %llu, loop %d, allowed_alloc %d\n",
3087 (unsigned long long)total_needed,
3088 (unsigned long long)num_bytes,
3089 loop, allowed_chunk_alloc);
3090 ret = -ENOSPC;
3092 if (block_group)
3093 put_block_group(block_group);
3095 up_read(&space_info->groups_sem);
3096 return ret;
3099 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3101 struct btrfs_block_group_cache *cache;
3103 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3104 (unsigned long long)(info->total_bytes - info->bytes_used -
3105 info->bytes_pinned - info->bytes_reserved),
3106 (info->full) ? "" : "not ");
3108 down_read(&info->groups_sem);
3109 list_for_each_entry(cache, &info->block_groups, list) {
3110 spin_lock(&cache->lock);
3111 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3112 "%llu pinned %llu reserved\n",
3113 (unsigned long long)cache->key.objectid,
3114 (unsigned long long)cache->key.offset,
3115 (unsigned long long)btrfs_block_group_used(&cache->item),
3116 (unsigned long long)cache->pinned,
3117 (unsigned long long)cache->reserved);
3118 btrfs_dump_free_space(cache, bytes);
3119 spin_unlock(&cache->lock);
3121 up_read(&info->groups_sem);
3124 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3125 struct btrfs_root *root,
3126 u64 num_bytes, u64 min_alloc_size,
3127 u64 empty_size, u64 hint_byte,
3128 u64 search_end, struct btrfs_key *ins,
3129 u64 data)
3131 int ret;
3132 u64 search_start = 0;
3133 u64 alloc_profile;
3134 struct btrfs_fs_info *info = root->fs_info;
3136 if (data) {
3137 alloc_profile = info->avail_data_alloc_bits &
3138 info->data_alloc_profile;
3139 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3140 } else if (root == root->fs_info->chunk_root) {
3141 alloc_profile = info->avail_system_alloc_bits &
3142 info->system_alloc_profile;
3143 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3144 } else {
3145 alloc_profile = info->avail_metadata_alloc_bits &
3146 info->metadata_alloc_profile;
3147 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3149 again:
3150 data = btrfs_reduce_alloc_profile(root, data);
3152 * the only place that sets empty_size is btrfs_realloc_node, which
3153 * is not called recursively on allocations
3155 if (empty_size || root->ref_cows) {
3156 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3157 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3158 2 * 1024 * 1024,
3159 BTRFS_BLOCK_GROUP_METADATA |
3160 (info->metadata_alloc_profile &
3161 info->avail_metadata_alloc_bits), 0);
3163 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3164 num_bytes + 2 * 1024 * 1024, data, 0);
3167 WARN_ON(num_bytes < root->sectorsize);
3168 ret = find_free_extent(trans, root, num_bytes, empty_size,
3169 search_start, search_end, hint_byte, ins,
3170 trans->alloc_exclude_start,
3171 trans->alloc_exclude_nr, data);
3173 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3174 num_bytes = num_bytes >> 1;
3175 num_bytes = num_bytes & ~(root->sectorsize - 1);
3176 num_bytes = max(num_bytes, min_alloc_size);
3177 do_chunk_alloc(trans, root->fs_info->extent_root,
3178 num_bytes, data, 1);
3179 goto again;
3181 if (ret) {
3182 struct btrfs_space_info *sinfo;
3184 sinfo = __find_space_info(root->fs_info, data);
3185 printk(KERN_ERR "btrfs allocation failed flags %llu, "
3186 "wanted %llu\n", (unsigned long long)data,
3187 (unsigned long long)num_bytes);
3188 dump_space_info(sinfo, num_bytes);
3189 BUG();
3192 return ret;
3195 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
3197 struct btrfs_block_group_cache *cache;
3198 int ret = 0;
3200 cache = btrfs_lookup_block_group(root->fs_info, start);
3201 if (!cache) {
3202 printk(KERN_ERR "Unable to find block group for %llu\n",
3203 (unsigned long long)start);
3204 return -ENOSPC;
3207 ret = btrfs_discard_extent(root, start, len);
3209 btrfs_add_free_space(cache, start, len);
3210 put_block_group(cache);
3211 update_reserved_extents(root, start, len, 0);
3213 return ret;
3216 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3217 struct btrfs_root *root,
3218 u64 num_bytes, u64 min_alloc_size,
3219 u64 empty_size, u64 hint_byte,
3220 u64 search_end, struct btrfs_key *ins,
3221 u64 data)
3223 int ret;
3224 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3225 empty_size, hint_byte, search_end, ins,
3226 data);
3227 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3228 return ret;
3231 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3232 struct btrfs_root *root, u64 parent,
3233 u64 root_objectid, u64 ref_generation,
3234 u64 owner, struct btrfs_key *ins)
3236 int ret;
3237 int pending_ret;
3238 u64 super_used;
3239 u64 root_used;
3240 u64 num_bytes = ins->offset;
3241 u32 sizes[2];
3242 struct btrfs_fs_info *info = root->fs_info;
3243 struct btrfs_root *extent_root = info->extent_root;
3244 struct btrfs_extent_item *extent_item;
3245 struct btrfs_extent_ref *ref;
3246 struct btrfs_path *path;
3247 struct btrfs_key keys[2];
3249 if (parent == 0)
3250 parent = ins->objectid;
3252 /* block accounting for super block */
3253 spin_lock(&info->delalloc_lock);
3254 super_used = btrfs_super_bytes_used(&info->super_copy);
3255 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
3257 /* block accounting for root item */
3258 root_used = btrfs_root_used(&root->root_item);
3259 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
3260 spin_unlock(&info->delalloc_lock);
3262 if (root == extent_root) {
3263 struct pending_extent_op *extent_op;
3265 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
3266 BUG_ON(!extent_op);
3268 extent_op->type = PENDING_EXTENT_INSERT;
3269 extent_op->bytenr = ins->objectid;
3270 extent_op->num_bytes = ins->offset;
3271 extent_op->parent = parent;
3272 extent_op->orig_parent = 0;
3273 extent_op->generation = ref_generation;
3274 extent_op->orig_generation = 0;
3275 extent_op->level = (int)owner;
3276 INIT_LIST_HEAD(&extent_op->list);
3277 extent_op->del = 0;
3279 mutex_lock(&root->fs_info->extent_ins_mutex);
3280 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
3281 ins->objectid + ins->offset - 1,
3282 EXTENT_WRITEBACK, GFP_NOFS);
3283 set_state_private(&root->fs_info->extent_ins,
3284 ins->objectid, (unsigned long)extent_op);
3285 mutex_unlock(&root->fs_info->extent_ins_mutex);
3286 goto update_block;
3289 memcpy(&keys[0], ins, sizeof(*ins));
3290 keys[1].objectid = ins->objectid;
3291 keys[1].type = BTRFS_EXTENT_REF_KEY;
3292 keys[1].offset = parent;
3293 sizes[0] = sizeof(*extent_item);
3294 sizes[1] = sizeof(*ref);
3296 path = btrfs_alloc_path();
3297 BUG_ON(!path);
3299 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
3300 sizes, 2);
3301 BUG_ON(ret);
3303 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3304 struct btrfs_extent_item);
3305 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
3306 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3307 struct btrfs_extent_ref);
3309 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
3310 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
3311 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
3312 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
3314 btrfs_mark_buffer_dirty(path->nodes[0]);
3316 trans->alloc_exclude_start = 0;
3317 trans->alloc_exclude_nr = 0;
3318 btrfs_free_path(path);
3319 finish_current_insert(trans, extent_root, 0);
3320 pending_ret = del_pending_extents(trans, extent_root, 0);
3322 if (ret)
3323 goto out;
3324 if (pending_ret) {
3325 ret = pending_ret;
3326 goto out;
3329 update_block:
3330 ret = update_block_group(trans, root, ins->objectid,
3331 ins->offset, 1, 0);
3332 if (ret) {
3333 printk(KERN_ERR "btrfs update block group failed for %llu "
3334 "%llu\n", (unsigned long long)ins->objectid,
3335 (unsigned long long)ins->offset);
3336 BUG();
3338 out:
3339 return ret;
3342 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3343 struct btrfs_root *root, u64 parent,
3344 u64 root_objectid, u64 ref_generation,
3345 u64 owner, struct btrfs_key *ins)
3347 int ret;
3349 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3350 return 0;
3351 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3352 ref_generation, owner, ins);
3353 update_reserved_extents(root, ins->objectid, ins->offset, 0);
3354 return ret;
3358 * this is used by the tree logging recovery code. It records that
3359 * an extent has been allocated and makes sure to clear the free
3360 * space cache bits as well
3362 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3363 struct btrfs_root *root, u64 parent,
3364 u64 root_objectid, u64 ref_generation,
3365 u64 owner, struct btrfs_key *ins)
3367 int ret;
3368 struct btrfs_block_group_cache *block_group;
3370 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3371 mutex_lock(&block_group->cache_mutex);
3372 cache_block_group(root, block_group);
3373 mutex_unlock(&block_group->cache_mutex);
3375 ret = btrfs_remove_free_space(block_group, ins->objectid,
3376 ins->offset);
3377 BUG_ON(ret);
3378 put_block_group(block_group);
3379 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3380 ref_generation, owner, ins);
3381 return ret;
3385 * finds a free extent and does all the dirty work required for allocation
3386 * returns the key for the extent through ins, and a tree buffer for
3387 * the first block of the extent through buf.
3389 * returns 0 if everything worked, non-zero otherwise.
3391 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3392 struct btrfs_root *root,
3393 u64 num_bytes, u64 parent, u64 min_alloc_size,
3394 u64 root_objectid, u64 ref_generation,
3395 u64 owner_objectid, u64 empty_size, u64 hint_byte,
3396 u64 search_end, struct btrfs_key *ins, u64 data)
3398 int ret;
3400 ret = __btrfs_reserve_extent(trans, root, num_bytes,
3401 min_alloc_size, empty_size, hint_byte,
3402 search_end, ins, data);
3403 BUG_ON(ret);
3404 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3405 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
3406 root_objectid, ref_generation,
3407 owner_objectid, ins);
3408 BUG_ON(ret);
3410 } else {
3411 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3413 return ret;
3416 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3417 struct btrfs_root *root,
3418 u64 bytenr, u32 blocksize)
3420 struct extent_buffer *buf;
3422 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3423 if (!buf)
3424 return ERR_PTR(-ENOMEM);
3425 btrfs_set_header_generation(buf, trans->transid);
3426 btrfs_tree_lock(buf);
3427 clean_tree_block(trans, root, buf);
3429 btrfs_set_lock_blocking(buf);
3430 btrfs_set_buffer_uptodate(buf);
3432 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3433 set_extent_dirty(&root->dirty_log_pages, buf->start,
3434 buf->start + buf->len - 1, GFP_NOFS);
3435 } else {
3436 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3437 buf->start + buf->len - 1, GFP_NOFS);
3439 trans->blocks_used++;
3440 /* this returns a buffer locked for blocking */
3441 return buf;
3445 * helper function to allocate a block for a given tree
3446 * returns the tree buffer or NULL.
3448 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3449 struct btrfs_root *root,
3450 u32 blocksize, u64 parent,
3451 u64 root_objectid,
3452 u64 ref_generation,
3453 int level,
3454 u64 hint,
3455 u64 empty_size)
3457 struct btrfs_key ins;
3458 int ret;
3459 struct extent_buffer *buf;
3461 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3462 root_objectid, ref_generation, level,
3463 empty_size, hint, (u64)-1, &ins, 0);
3464 if (ret) {
3465 BUG_ON(ret > 0);
3466 return ERR_PTR(ret);
3469 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
3470 return buf;
3473 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3474 struct btrfs_root *root, struct extent_buffer *leaf)
3476 u64 leaf_owner;
3477 u64 leaf_generation;
3478 struct refsort *sorted;
3479 struct btrfs_key key;
3480 struct btrfs_file_extent_item *fi;
3481 int i;
3482 int nritems;
3483 int ret;
3484 int refi = 0;
3485 int slot;
3487 BUG_ON(!btrfs_is_leaf(leaf));
3488 nritems = btrfs_header_nritems(leaf);
3489 leaf_owner = btrfs_header_owner(leaf);
3490 leaf_generation = btrfs_header_generation(leaf);
3492 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3493 /* we do this loop twice. The first time we build a list
3494 * of the extents we have a reference on, then we sort the list
3495 * by bytenr. The second time around we actually do the
3496 * extent freeing.
3498 for (i = 0; i < nritems; i++) {
3499 u64 disk_bytenr;
3500 cond_resched();
3502 btrfs_item_key_to_cpu(leaf, &key, i);
3504 /* only extents have references, skip everything else */
3505 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3506 continue;
3508 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3510 /* inline extents live in the btree, they don't have refs */
3511 if (btrfs_file_extent_type(leaf, fi) ==
3512 BTRFS_FILE_EXTENT_INLINE)
3513 continue;
3515 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3517 /* holes don't have refs */
3518 if (disk_bytenr == 0)
3519 continue;
3521 sorted[refi].bytenr = disk_bytenr;
3522 sorted[refi].slot = i;
3523 refi++;
3526 if (refi == 0)
3527 goto out;
3529 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3531 for (i = 0; i < refi; i++) {
3532 u64 disk_bytenr;
3534 disk_bytenr = sorted[i].bytenr;
3535 slot = sorted[i].slot;
3537 cond_resched();
3539 btrfs_item_key_to_cpu(leaf, &key, slot);
3540 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3541 continue;
3543 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3545 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3546 btrfs_file_extent_disk_num_bytes(leaf, fi),
3547 leaf->start, leaf_owner, leaf_generation,
3548 key.objectid, 0);
3549 BUG_ON(ret);
3551 atomic_inc(&root->fs_info->throttle_gen);
3552 wake_up(&root->fs_info->transaction_throttle);
3553 cond_resched();
3555 out:
3556 kfree(sorted);
3557 return 0;
3560 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3561 struct btrfs_root *root,
3562 struct btrfs_leaf_ref *ref)
3564 int i;
3565 int ret;
3566 struct btrfs_extent_info *info;
3567 struct refsort *sorted;
3569 if (ref->nritems == 0)
3570 return 0;
3572 sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
3573 for (i = 0; i < ref->nritems; i++) {
3574 sorted[i].bytenr = ref->extents[i].bytenr;
3575 sorted[i].slot = i;
3577 sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
3580 * the items in the ref were sorted when the ref was inserted
3581 * into the ref cache, so this is already in order
3583 for (i = 0; i < ref->nritems; i++) {
3584 info = ref->extents + sorted[i].slot;
3585 ret = __btrfs_free_extent(trans, root, info->bytenr,
3586 info->num_bytes, ref->bytenr,
3587 ref->owner, ref->generation,
3588 info->objectid, 0);
3590 atomic_inc(&root->fs_info->throttle_gen);
3591 wake_up(&root->fs_info->transaction_throttle);
3592 cond_resched();
3594 BUG_ON(ret);
3595 info++;
3598 kfree(sorted);
3599 return 0;
3602 static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
3603 u64 len, u32 *refs)
3605 int ret;
3607 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
3608 BUG_ON(ret);
3610 #if 0 /* some debugging code in case we see problems here */
3611 /* if the refs count is one, it won't get increased again. But
3612 * if the ref count is > 1, someone may be decreasing it at
3613 * the same time we are.
3615 if (*refs != 1) {
3616 struct extent_buffer *eb = NULL;
3617 eb = btrfs_find_create_tree_block(root, start, len);
3618 if (eb)
3619 btrfs_tree_lock(eb);
3621 mutex_lock(&root->fs_info->alloc_mutex);
3622 ret = lookup_extent_ref(NULL, root, start, len, refs);
3623 BUG_ON(ret);
3624 mutex_unlock(&root->fs_info->alloc_mutex);
3626 if (eb) {
3627 btrfs_tree_unlock(eb);
3628 free_extent_buffer(eb);
3630 if (*refs == 1) {
3631 printk(KERN_ERR "btrfs block %llu went down to one "
3632 "during drop_snap\n", (unsigned long long)start);
3636 #endif
3638 cond_resched();
3639 return ret;
3643 * this is used while deleting old snapshots, and it drops the refs
3644 * on a whole subtree starting from a level 1 node.
3646 * The idea is to sort all the leaf pointers, and then drop the
3647 * ref on all the leaves in order. Most of the time the leaves
3648 * will have ref cache entries, so no leaf IOs will be required to
3649 * find the extents they have references on.
3651 * For each leaf, any references it has are also dropped in order
3653 * This ends up dropping the references in something close to optimal
3654 * order for reading and modifying the extent allocation tree.
3656 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
3657 struct btrfs_root *root,
3658 struct btrfs_path *path)
3660 u64 bytenr;
3661 u64 root_owner;
3662 u64 root_gen;
3663 struct extent_buffer *eb = path->nodes[1];
3664 struct extent_buffer *leaf;
3665 struct btrfs_leaf_ref *ref;
3666 struct refsort *sorted = NULL;
3667 int nritems = btrfs_header_nritems(eb);
3668 int ret;
3669 int i;
3670 int refi = 0;
3671 int slot = path->slots[1];
3672 u32 blocksize = btrfs_level_size(root, 0);
3673 u32 refs;
3675 if (nritems == 0)
3676 goto out;
3678 root_owner = btrfs_header_owner(eb);
3679 root_gen = btrfs_header_generation(eb);
3680 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3683 * step one, sort all the leaf pointers so we don't scribble
3684 * randomly into the extent allocation tree
3686 for (i = slot; i < nritems; i++) {
3687 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
3688 sorted[refi].slot = i;
3689 refi++;
3693 * nritems won't be zero, but if we're picking up drop_snapshot
3694 * after a crash, slot might be > 0, so double check things
3695 * just in case.
3697 if (refi == 0)
3698 goto out;
3700 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3703 * the first loop frees everything the leaves point to
3705 for (i = 0; i < refi; i++) {
3706 u64 ptr_gen;
3708 bytenr = sorted[i].bytenr;
3711 * check the reference count on this leaf. If it is > 1
3712 * we just decrement it below and don't update any
3713 * of the refs the leaf points to.
3715 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3716 BUG_ON(ret);
3717 if (refs != 1)
3718 continue;
3720 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
3723 * the leaf only had one reference, which means the
3724 * only thing pointing to this leaf is the snapshot
3725 * we're deleting. It isn't possible for the reference
3726 * count to increase again later
3728 * The reference cache is checked for the leaf,
3729 * and if found we'll be able to drop any refs held by
3730 * the leaf without needing to read it in.
3732 ref = btrfs_lookup_leaf_ref(root, bytenr);
3733 if (ref && ref->generation != ptr_gen) {
3734 btrfs_free_leaf_ref(root, ref);
3735 ref = NULL;
3737 if (ref) {
3738 ret = cache_drop_leaf_ref(trans, root, ref);
3739 BUG_ON(ret);
3740 btrfs_remove_leaf_ref(root, ref);
3741 btrfs_free_leaf_ref(root, ref);
3742 } else {
3744 * the leaf wasn't in the reference cache, so
3745 * we have to read it.
3747 leaf = read_tree_block(root, bytenr, blocksize,
3748 ptr_gen);
3749 ret = btrfs_drop_leaf_ref(trans, root, leaf);
3750 BUG_ON(ret);
3751 free_extent_buffer(leaf);
3753 atomic_inc(&root->fs_info->throttle_gen);
3754 wake_up(&root->fs_info->transaction_throttle);
3755 cond_resched();
3759 * run through the loop again to free the refs on the leaves.
3760 * This is faster than doing it in the loop above because
3761 * the leaves are likely to be clustered together. We end up
3762 * working in nice chunks on the extent allocation tree.
3764 for (i = 0; i < refi; i++) {
3765 bytenr = sorted[i].bytenr;
3766 ret = __btrfs_free_extent(trans, root, bytenr,
3767 blocksize, eb->start,
3768 root_owner, root_gen, 0, 1);
3769 BUG_ON(ret);
3771 atomic_inc(&root->fs_info->throttle_gen);
3772 wake_up(&root->fs_info->transaction_throttle);
3773 cond_resched();
3775 out:
3776 kfree(sorted);
3779 * update the path to show we've processed the entire level 1
3780 * node. This will get saved into the root's drop_snapshot_progress
3781 * field so these drops are not repeated again if this transaction
3782 * commits.
3784 path->slots[1] = nritems;
3785 return 0;
3789 * helper function for drop_snapshot, this walks down the tree dropping ref
3790 * counts as it goes.
3792 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3793 struct btrfs_root *root,
3794 struct btrfs_path *path, int *level)
3796 u64 root_owner;
3797 u64 root_gen;
3798 u64 bytenr;
3799 u64 ptr_gen;
3800 struct extent_buffer *next;
3801 struct extent_buffer *cur;
3802 struct extent_buffer *parent;
3803 u32 blocksize;
3804 int ret;
3805 u32 refs;
3807 WARN_ON(*level < 0);
3808 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3809 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
3810 path->nodes[*level]->len, &refs);
3811 BUG_ON(ret);
3812 if (refs > 1)
3813 goto out;
3816 * walk down to the last node level and free all the leaves
3818 while (*level >= 0) {
3819 WARN_ON(*level < 0);
3820 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3821 cur = path->nodes[*level];
3823 if (btrfs_header_level(cur) != *level)
3824 WARN_ON(1);
3826 if (path->slots[*level] >=
3827 btrfs_header_nritems(cur))
3828 break;
3830 /* the new code goes down to level 1 and does all the
3831 * leaves pointed to that node in bulk. So, this check
3832 * for level 0 will always be false.
3834 * But, the disk format allows the drop_snapshot_progress
3835 * field in the root to leave things in a state where
3836 * a leaf will need cleaning up here. If someone crashes
3837 * with the old code and then boots with the new code,
3838 * we might find a leaf here.
3840 if (*level == 0) {
3841 ret = btrfs_drop_leaf_ref(trans, root, cur);
3842 BUG_ON(ret);
3843 break;
3847 * once we get to level one, process the whole node
3848 * at once, including everything below it.
3850 if (*level == 1) {
3851 ret = drop_level_one_refs(trans, root, path);
3852 BUG_ON(ret);
3853 break;
3856 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3857 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3858 blocksize = btrfs_level_size(root, *level - 1);
3860 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3861 BUG_ON(ret);
3864 * if there is more than one reference, we don't need
3865 * to read that node to drop any references it has. We
3866 * just drop the ref we hold on that node and move on to the
3867 * next slot in this level.
3869 if (refs != 1) {
3870 parent = path->nodes[*level];
3871 root_owner = btrfs_header_owner(parent);
3872 root_gen = btrfs_header_generation(parent);
3873 path->slots[*level]++;
3875 ret = __btrfs_free_extent(trans, root, bytenr,
3876 blocksize, parent->start,
3877 root_owner, root_gen,
3878 *level - 1, 1);
3879 BUG_ON(ret);
3881 atomic_inc(&root->fs_info->throttle_gen);
3882 wake_up(&root->fs_info->transaction_throttle);
3883 cond_resched();
3885 continue;
3889 * we need to keep freeing things in the next level down.
3890 * read the block and loop around to process it
3892 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3893 WARN_ON(*level <= 0);
3894 if (path->nodes[*level-1])
3895 free_extent_buffer(path->nodes[*level-1]);
3896 path->nodes[*level-1] = next;
3897 *level = btrfs_header_level(next);
3898 path->slots[*level] = 0;
3899 cond_resched();
3901 out:
3902 WARN_ON(*level < 0);
3903 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3905 if (path->nodes[*level] == root->node) {
3906 parent = path->nodes[*level];
3907 bytenr = path->nodes[*level]->start;
3908 } else {
3909 parent = path->nodes[*level + 1];
3910 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3913 blocksize = btrfs_level_size(root, *level);
3914 root_owner = btrfs_header_owner(parent);
3915 root_gen = btrfs_header_generation(parent);
3918 * cleanup and free the reference on the last node
3919 * we processed
3921 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3922 parent->start, root_owner, root_gen,
3923 *level, 1);
3924 free_extent_buffer(path->nodes[*level]);
3925 path->nodes[*level] = NULL;
3927 *level += 1;
3928 BUG_ON(ret);
3930 cond_resched();
3931 return 0;
3935 * helper function for drop_subtree, this function is similar to
3936 * walk_down_tree. The main difference is that it checks reference
3937 * counts while tree blocks are locked.
3939 static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3940 struct btrfs_root *root,
3941 struct btrfs_path *path, int *level)
3943 struct extent_buffer *next;
3944 struct extent_buffer *cur;
3945 struct extent_buffer *parent;
3946 u64 bytenr;
3947 u64 ptr_gen;
3948 u32 blocksize;
3949 u32 refs;
3950 int ret;
3952 cur = path->nodes[*level];
3953 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3954 &refs);
3955 BUG_ON(ret);
3956 if (refs > 1)
3957 goto out;
3959 while (*level >= 0) {
3960 cur = path->nodes[*level];
3961 if (*level == 0) {
3962 ret = btrfs_drop_leaf_ref(trans, root, cur);
3963 BUG_ON(ret);
3964 clean_tree_block(trans, root, cur);
3965 break;
3967 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3968 clean_tree_block(trans, root, cur);
3969 break;
3972 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3973 blocksize = btrfs_level_size(root, *level - 1);
3974 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3976 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3977 btrfs_tree_lock(next);
3978 btrfs_set_lock_blocking(next);
3980 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3981 &refs);
3982 BUG_ON(ret);
3983 if (refs > 1) {
3984 parent = path->nodes[*level];
3985 ret = btrfs_free_extent(trans, root, bytenr,
3986 blocksize, parent->start,
3987 btrfs_header_owner(parent),
3988 btrfs_header_generation(parent),
3989 *level - 1, 1);
3990 BUG_ON(ret);
3991 path->slots[*level]++;
3992 btrfs_tree_unlock(next);
3993 free_extent_buffer(next);
3994 continue;
3997 *level = btrfs_header_level(next);
3998 path->nodes[*level] = next;
3999 path->slots[*level] = 0;
4000 path->locks[*level] = 1;
4001 cond_resched();
4003 out:
4004 parent = path->nodes[*level + 1];
4005 bytenr = path->nodes[*level]->start;
4006 blocksize = path->nodes[*level]->len;
4008 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
4009 parent->start, btrfs_header_owner(parent),
4010 btrfs_header_generation(parent), *level, 1);
4011 BUG_ON(ret);
4013 if (path->locks[*level]) {
4014 btrfs_tree_unlock(path->nodes[*level]);
4015 path->locks[*level] = 0;
4017 free_extent_buffer(path->nodes[*level]);
4018 path->nodes[*level] = NULL;
4019 *level += 1;
4020 cond_resched();
4021 return 0;
4025 * helper for dropping snapshots. This walks back up the tree in the path
4026 * to find the first node higher up where we haven't yet gone through
4027 * all the slots
4029 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4030 struct btrfs_root *root,
4031 struct btrfs_path *path,
4032 int *level, int max_level)
4034 u64 root_owner;
4035 u64 root_gen;
4036 struct btrfs_root_item *root_item = &root->root_item;
4037 int i;
4038 int slot;
4039 int ret;
4041 for (i = *level; i < max_level && path->nodes[i]; i++) {
4042 slot = path->slots[i];
4043 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
4044 struct extent_buffer *node;
4045 struct btrfs_disk_key disk_key;
4048 * there is more work to do in this level.
4049 * Update the drop_progress marker to reflect
4050 * the work we've done so far, and then bump
4051 * the slot number
4053 node = path->nodes[i];
4054 path->slots[i]++;
4055 *level = i;
4056 WARN_ON(*level == 0);
4057 btrfs_node_key(node, &disk_key, path->slots[i]);
4058 memcpy(&root_item->drop_progress,
4059 &disk_key, sizeof(disk_key));
4060 root_item->drop_level = i;
4061 return 0;
4062 } else {
4063 struct extent_buffer *parent;
4066 * this whole node is done, free our reference
4067 * on it and go up one level
4069 if (path->nodes[*level] == root->node)
4070 parent = path->nodes[*level];
4071 else
4072 parent = path->nodes[*level + 1];
4074 root_owner = btrfs_header_owner(parent);
4075 root_gen = btrfs_header_generation(parent);
4077 clean_tree_block(trans, root, path->nodes[*level]);
4078 ret = btrfs_free_extent(trans, root,
4079 path->nodes[*level]->start,
4080 path->nodes[*level]->len,
4081 parent->start, root_owner,
4082 root_gen, *level, 1);
4083 BUG_ON(ret);
4084 if (path->locks[*level]) {
4085 btrfs_tree_unlock(path->nodes[*level]);
4086 path->locks[*level] = 0;
4088 free_extent_buffer(path->nodes[*level]);
4089 path->nodes[*level] = NULL;
4090 *level = i + 1;
4093 return 1;
4097 * drop the reference count on the tree rooted at 'snap'. This traverses
4098 * the tree freeing any blocks that have a ref count of zero after being
4099 * decremented.
4101 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
4102 *root)
4104 int ret = 0;
4105 int wret;
4106 int level;
4107 struct btrfs_path *path;
4108 int i;
4109 int orig_level;
4110 struct btrfs_root_item *root_item = &root->root_item;
4112 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
4113 path = btrfs_alloc_path();
4114 BUG_ON(!path);
4116 level = btrfs_header_level(root->node);
4117 orig_level = level;
4118 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
4119 path->nodes[level] = root->node;
4120 extent_buffer_get(root->node);
4121 path->slots[level] = 0;
4122 } else {
4123 struct btrfs_key key;
4124 struct btrfs_disk_key found_key;
4125 struct extent_buffer *node;
4127 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
4128 level = root_item->drop_level;
4129 path->lowest_level = level;
4130 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4131 if (wret < 0) {
4132 ret = wret;
4133 goto out;
4135 node = path->nodes[level];
4136 btrfs_node_key(node, &found_key, path->slots[level]);
4137 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
4138 sizeof(found_key)));
4140 * unlock our path, this is safe because only this
4141 * function is allowed to delete this snapshot
4143 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4144 if (path->nodes[i] && path->locks[i]) {
4145 path->locks[i] = 0;
4146 btrfs_tree_unlock(path->nodes[i]);
4150 while (1) {
4151 wret = walk_down_tree(trans, root, path, &level);
4152 if (wret > 0)
4153 break;
4154 if (wret < 0)
4155 ret = wret;
4157 wret = walk_up_tree(trans, root, path, &level,
4158 BTRFS_MAX_LEVEL);
4159 if (wret > 0)
4160 break;
4161 if (wret < 0)
4162 ret = wret;
4163 if (trans->transaction->in_commit) {
4164 ret = -EAGAIN;
4165 break;
4167 atomic_inc(&root->fs_info->throttle_gen);
4168 wake_up(&root->fs_info->transaction_throttle);
4170 for (i = 0; i <= orig_level; i++) {
4171 if (path->nodes[i]) {
4172 free_extent_buffer(path->nodes[i]);
4173 path->nodes[i] = NULL;
4176 out:
4177 btrfs_free_path(path);
4178 return ret;
4181 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4182 struct btrfs_root *root,
4183 struct extent_buffer *node,
4184 struct extent_buffer *parent)
4186 struct btrfs_path *path;
4187 int level;
4188 int parent_level;
4189 int ret = 0;
4190 int wret;
4192 path = btrfs_alloc_path();
4193 BUG_ON(!path);
4195 BUG_ON(!btrfs_tree_locked(parent));
4196 parent_level = btrfs_header_level(parent);
4197 extent_buffer_get(parent);
4198 path->nodes[parent_level] = parent;
4199 path->slots[parent_level] = btrfs_header_nritems(parent);
4201 BUG_ON(!btrfs_tree_locked(node));
4202 level = btrfs_header_level(node);
4203 extent_buffer_get(node);
4204 path->nodes[level] = node;
4205 path->slots[level] = 0;
4207 while (1) {
4208 wret = walk_down_subtree(trans, root, path, &level);
4209 if (wret < 0)
4210 ret = wret;
4211 if (wret != 0)
4212 break;
4214 wret = walk_up_tree(trans, root, path, &level, parent_level);
4215 if (wret < 0)
4216 ret = wret;
4217 if (wret != 0)
4218 break;
4221 btrfs_free_path(path);
4222 return ret;
4225 static unsigned long calc_ra(unsigned long start, unsigned long last,
4226 unsigned long nr)
4228 return min(last, start + nr - 1);
4231 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
4232 u64 len)
4234 u64 page_start;
4235 u64 page_end;
4236 unsigned long first_index;
4237 unsigned long last_index;
4238 unsigned long i;
4239 struct page *page;
4240 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4241 struct file_ra_state *ra;
4242 struct btrfs_ordered_extent *ordered;
4243 unsigned int total_read = 0;
4244 unsigned int total_dirty = 0;
4245 int ret = 0;
4247 ra = kzalloc(sizeof(*ra), GFP_NOFS);
4249 mutex_lock(&inode->i_mutex);
4250 first_index = start >> PAGE_CACHE_SHIFT;
4251 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
4253 /* make sure the dirty trick played by the caller work */
4254 ret = invalidate_inode_pages2_range(inode->i_mapping,
4255 first_index, last_index);
4256 if (ret)
4257 goto out_unlock;
4259 file_ra_state_init(ra, inode->i_mapping);
4261 for (i = first_index ; i <= last_index; i++) {
4262 if (total_read % ra->ra_pages == 0) {
4263 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
4264 calc_ra(i, last_index, ra->ra_pages));
4266 total_read++;
4267 again:
4268 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
4269 BUG_ON(1);
4270 page = grab_cache_page(inode->i_mapping, i);
4271 if (!page) {
4272 ret = -ENOMEM;
4273 goto out_unlock;
4275 if (!PageUptodate(page)) {
4276 btrfs_readpage(NULL, page);
4277 lock_page(page);
4278 if (!PageUptodate(page)) {
4279 unlock_page(page);
4280 page_cache_release(page);
4281 ret = -EIO;
4282 goto out_unlock;
4285 wait_on_page_writeback(page);
4287 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
4288 page_end = page_start + PAGE_CACHE_SIZE - 1;
4289 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4291 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4292 if (ordered) {
4293 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4294 unlock_page(page);
4295 page_cache_release(page);
4296 btrfs_start_ordered_extent(inode, ordered, 1);
4297 btrfs_put_ordered_extent(ordered);
4298 goto again;
4300 set_page_extent_mapped(page);
4302 if (i == first_index)
4303 set_extent_bits(io_tree, page_start, page_end,
4304 EXTENT_BOUNDARY, GFP_NOFS);
4305 btrfs_set_extent_delalloc(inode, page_start, page_end);
4307 set_page_dirty(page);
4308 total_dirty++;
4310 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4311 unlock_page(page);
4312 page_cache_release(page);
4315 out_unlock:
4316 kfree(ra);
4317 mutex_unlock(&inode->i_mutex);
4318 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
4319 return ret;
4322 static noinline int relocate_data_extent(struct inode *reloc_inode,
4323 struct btrfs_key *extent_key,
4324 u64 offset)
4326 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4327 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
4328 struct extent_map *em;
4329 u64 start = extent_key->objectid - offset;
4330 u64 end = start + extent_key->offset - 1;
4332 em = alloc_extent_map(GFP_NOFS);
4333 BUG_ON(!em || IS_ERR(em));
4335 em->start = start;
4336 em->len = extent_key->offset;
4337 em->block_len = extent_key->offset;
4338 em->block_start = extent_key->objectid;
4339 em->bdev = root->fs_info->fs_devices->latest_bdev;
4340 set_bit(EXTENT_FLAG_PINNED, &em->flags);
4342 /* setup extent map to cheat btrfs_readpage */
4343 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4344 while (1) {
4345 int ret;
4346 spin_lock(&em_tree->lock);
4347 ret = add_extent_mapping(em_tree, em);
4348 spin_unlock(&em_tree->lock);
4349 if (ret != -EEXIST) {
4350 free_extent_map(em);
4351 break;
4353 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4355 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4357 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4360 struct btrfs_ref_path {
4361 u64 extent_start;
4362 u64 nodes[BTRFS_MAX_LEVEL];
4363 u64 root_objectid;
4364 u64 root_generation;
4365 u64 owner_objectid;
4366 u32 num_refs;
4367 int lowest_level;
4368 int current_level;
4369 int shared_level;
4371 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4372 u64 new_nodes[BTRFS_MAX_LEVEL];
4375 struct disk_extent {
4376 u64 ram_bytes;
4377 u64 disk_bytenr;
4378 u64 disk_num_bytes;
4379 u64 offset;
4380 u64 num_bytes;
4381 u8 compression;
4382 u8 encryption;
4383 u16 other_encoding;
4386 static int is_cowonly_root(u64 root_objectid)
4388 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4389 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4390 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4391 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4392 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4393 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
4394 return 1;
4395 return 0;
4398 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
4399 struct btrfs_root *extent_root,
4400 struct btrfs_ref_path *ref_path,
4401 int first_time)
4403 struct extent_buffer *leaf;
4404 struct btrfs_path *path;
4405 struct btrfs_extent_ref *ref;
4406 struct btrfs_key key;
4407 struct btrfs_key found_key;
4408 u64 bytenr;
4409 u32 nritems;
4410 int level;
4411 int ret = 1;
4413 path = btrfs_alloc_path();
4414 if (!path)
4415 return -ENOMEM;
4417 if (first_time) {
4418 ref_path->lowest_level = -1;
4419 ref_path->current_level = -1;
4420 ref_path->shared_level = -1;
4421 goto walk_up;
4423 walk_down:
4424 level = ref_path->current_level - 1;
4425 while (level >= -1) {
4426 u64 parent;
4427 if (level < ref_path->lowest_level)
4428 break;
4430 if (level >= 0)
4431 bytenr = ref_path->nodes[level];
4432 else
4433 bytenr = ref_path->extent_start;
4434 BUG_ON(bytenr == 0);
4436 parent = ref_path->nodes[level + 1];
4437 ref_path->nodes[level + 1] = 0;
4438 ref_path->current_level = level;
4439 BUG_ON(parent == 0);
4441 key.objectid = bytenr;
4442 key.offset = parent + 1;
4443 key.type = BTRFS_EXTENT_REF_KEY;
4445 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4446 if (ret < 0)
4447 goto out;
4448 BUG_ON(ret == 0);
4450 leaf = path->nodes[0];
4451 nritems = btrfs_header_nritems(leaf);
4452 if (path->slots[0] >= nritems) {
4453 ret = btrfs_next_leaf(extent_root, path);
4454 if (ret < 0)
4455 goto out;
4456 if (ret > 0)
4457 goto next;
4458 leaf = path->nodes[0];
4461 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4462 if (found_key.objectid == bytenr &&
4463 found_key.type == BTRFS_EXTENT_REF_KEY) {
4464 if (level < ref_path->shared_level)
4465 ref_path->shared_level = level;
4466 goto found;
4468 next:
4469 level--;
4470 btrfs_release_path(extent_root, path);
4471 cond_resched();
4473 /* reached lowest level */
4474 ret = 1;
4475 goto out;
4476 walk_up:
4477 level = ref_path->current_level;
4478 while (level < BTRFS_MAX_LEVEL - 1) {
4479 u64 ref_objectid;
4481 if (level >= 0)
4482 bytenr = ref_path->nodes[level];
4483 else
4484 bytenr = ref_path->extent_start;
4486 BUG_ON(bytenr == 0);
4488 key.objectid = bytenr;
4489 key.offset = 0;
4490 key.type = BTRFS_EXTENT_REF_KEY;
4492 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4493 if (ret < 0)
4494 goto out;
4496 leaf = path->nodes[0];
4497 nritems = btrfs_header_nritems(leaf);
4498 if (path->slots[0] >= nritems) {
4499 ret = btrfs_next_leaf(extent_root, path);
4500 if (ret < 0)
4501 goto out;
4502 if (ret > 0) {
4503 /* the extent was freed by someone */
4504 if (ref_path->lowest_level == level)
4505 goto out;
4506 btrfs_release_path(extent_root, path);
4507 goto walk_down;
4509 leaf = path->nodes[0];
4512 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4513 if (found_key.objectid != bytenr ||
4514 found_key.type != BTRFS_EXTENT_REF_KEY) {
4515 /* the extent was freed by someone */
4516 if (ref_path->lowest_level == level) {
4517 ret = 1;
4518 goto out;
4520 btrfs_release_path(extent_root, path);
4521 goto walk_down;
4523 found:
4524 ref = btrfs_item_ptr(leaf, path->slots[0],
4525 struct btrfs_extent_ref);
4526 ref_objectid = btrfs_ref_objectid(leaf, ref);
4527 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4528 if (first_time) {
4529 level = (int)ref_objectid;
4530 BUG_ON(level >= BTRFS_MAX_LEVEL);
4531 ref_path->lowest_level = level;
4532 ref_path->current_level = level;
4533 ref_path->nodes[level] = bytenr;
4534 } else {
4535 WARN_ON(ref_objectid != level);
4537 } else {
4538 WARN_ON(level != -1);
4540 first_time = 0;
4542 if (ref_path->lowest_level == level) {
4543 ref_path->owner_objectid = ref_objectid;
4544 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4548 * the block is tree root or the block isn't in reference
4549 * counted tree.
4551 if (found_key.objectid == found_key.offset ||
4552 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4553 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4554 ref_path->root_generation =
4555 btrfs_ref_generation(leaf, ref);
4556 if (level < 0) {
4557 /* special reference from the tree log */
4558 ref_path->nodes[0] = found_key.offset;
4559 ref_path->current_level = 0;
4561 ret = 0;
4562 goto out;
4565 level++;
4566 BUG_ON(ref_path->nodes[level] != 0);
4567 ref_path->nodes[level] = found_key.offset;
4568 ref_path->current_level = level;
4571 * the reference was created in the running transaction,
4572 * no need to continue walking up.
4574 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4575 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4576 ref_path->root_generation =
4577 btrfs_ref_generation(leaf, ref);
4578 ret = 0;
4579 goto out;
4582 btrfs_release_path(extent_root, path);
4583 cond_resched();
4585 /* reached max tree level, but no tree root found. */
4586 BUG();
4587 out:
4588 btrfs_free_path(path);
4589 return ret;
4592 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4593 struct btrfs_root *extent_root,
4594 struct btrfs_ref_path *ref_path,
4595 u64 extent_start)
4597 memset(ref_path, 0, sizeof(*ref_path));
4598 ref_path->extent_start = extent_start;
4600 return __next_ref_path(trans, extent_root, ref_path, 1);
4603 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4604 struct btrfs_root *extent_root,
4605 struct btrfs_ref_path *ref_path)
4607 return __next_ref_path(trans, extent_root, ref_path, 0);
4610 static noinline int get_new_locations(struct inode *reloc_inode,
4611 struct btrfs_key *extent_key,
4612 u64 offset, int no_fragment,
4613 struct disk_extent **extents,
4614 int *nr_extents)
4616 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4617 struct btrfs_path *path;
4618 struct btrfs_file_extent_item *fi;
4619 struct extent_buffer *leaf;
4620 struct disk_extent *exts = *extents;
4621 struct btrfs_key found_key;
4622 u64 cur_pos;
4623 u64 last_byte;
4624 u32 nritems;
4625 int nr = 0;
4626 int max = *nr_extents;
4627 int ret;
4629 WARN_ON(!no_fragment && *extents);
4630 if (!exts) {
4631 max = 1;
4632 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4633 if (!exts)
4634 return -ENOMEM;
4637 path = btrfs_alloc_path();
4638 BUG_ON(!path);
4640 cur_pos = extent_key->objectid - offset;
4641 last_byte = extent_key->objectid + extent_key->offset;
4642 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4643 cur_pos, 0);
4644 if (ret < 0)
4645 goto out;
4646 if (ret > 0) {
4647 ret = -ENOENT;
4648 goto out;
4651 while (1) {
4652 leaf = path->nodes[0];
4653 nritems = btrfs_header_nritems(leaf);
4654 if (path->slots[0] >= nritems) {
4655 ret = btrfs_next_leaf(root, path);
4656 if (ret < 0)
4657 goto out;
4658 if (ret > 0)
4659 break;
4660 leaf = path->nodes[0];
4663 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4664 if (found_key.offset != cur_pos ||
4665 found_key.type != BTRFS_EXTENT_DATA_KEY ||
4666 found_key.objectid != reloc_inode->i_ino)
4667 break;
4669 fi = btrfs_item_ptr(leaf, path->slots[0],
4670 struct btrfs_file_extent_item);
4671 if (btrfs_file_extent_type(leaf, fi) !=
4672 BTRFS_FILE_EXTENT_REG ||
4673 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4674 break;
4676 if (nr == max) {
4677 struct disk_extent *old = exts;
4678 max *= 2;
4679 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4680 memcpy(exts, old, sizeof(*exts) * nr);
4681 if (old != *extents)
4682 kfree(old);
4685 exts[nr].disk_bytenr =
4686 btrfs_file_extent_disk_bytenr(leaf, fi);
4687 exts[nr].disk_num_bytes =
4688 btrfs_file_extent_disk_num_bytes(leaf, fi);
4689 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4690 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4691 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4692 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4693 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4694 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4695 fi);
4696 BUG_ON(exts[nr].offset > 0);
4697 BUG_ON(exts[nr].compression || exts[nr].encryption);
4698 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4700 cur_pos += exts[nr].num_bytes;
4701 nr++;
4703 if (cur_pos + offset >= last_byte)
4704 break;
4706 if (no_fragment) {
4707 ret = 1;
4708 goto out;
4710 path->slots[0]++;
4713 BUG_ON(cur_pos + offset > last_byte);
4714 if (cur_pos + offset < last_byte) {
4715 ret = -ENOENT;
4716 goto out;
4718 ret = 0;
4719 out:
4720 btrfs_free_path(path);
4721 if (ret) {
4722 if (exts != *extents)
4723 kfree(exts);
4724 } else {
4725 *extents = exts;
4726 *nr_extents = nr;
4728 return ret;
4731 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4732 struct btrfs_root *root,
4733 struct btrfs_path *path,
4734 struct btrfs_key *extent_key,
4735 struct btrfs_key *leaf_key,
4736 struct btrfs_ref_path *ref_path,
4737 struct disk_extent *new_extents,
4738 int nr_extents)
4740 struct extent_buffer *leaf;
4741 struct btrfs_file_extent_item *fi;
4742 struct inode *inode = NULL;
4743 struct btrfs_key key;
4744 u64 lock_start = 0;
4745 u64 lock_end = 0;
4746 u64 num_bytes;
4747 u64 ext_offset;
4748 u64 search_end = (u64)-1;
4749 u32 nritems;
4750 int nr_scaned = 0;
4751 int extent_locked = 0;
4752 int extent_type;
4753 int ret;
4755 memcpy(&key, leaf_key, sizeof(key));
4756 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4757 if (key.objectid < ref_path->owner_objectid ||
4758 (key.objectid == ref_path->owner_objectid &&
4759 key.type < BTRFS_EXTENT_DATA_KEY)) {
4760 key.objectid = ref_path->owner_objectid;
4761 key.type = BTRFS_EXTENT_DATA_KEY;
4762 key.offset = 0;
4766 while (1) {
4767 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4768 if (ret < 0)
4769 goto out;
4771 leaf = path->nodes[0];
4772 nritems = btrfs_header_nritems(leaf);
4773 next:
4774 if (extent_locked && ret > 0) {
4776 * the file extent item was modified by someone
4777 * before the extent got locked.
4779 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4780 lock_end, GFP_NOFS);
4781 extent_locked = 0;
4784 if (path->slots[0] >= nritems) {
4785 if (++nr_scaned > 2)
4786 break;
4788 BUG_ON(extent_locked);
4789 ret = btrfs_next_leaf(root, path);
4790 if (ret < 0)
4791 goto out;
4792 if (ret > 0)
4793 break;
4794 leaf = path->nodes[0];
4795 nritems = btrfs_header_nritems(leaf);
4798 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4800 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4801 if ((key.objectid > ref_path->owner_objectid) ||
4802 (key.objectid == ref_path->owner_objectid &&
4803 key.type > BTRFS_EXTENT_DATA_KEY) ||
4804 key.offset >= search_end)
4805 break;
4808 if (inode && key.objectid != inode->i_ino) {
4809 BUG_ON(extent_locked);
4810 btrfs_release_path(root, path);
4811 mutex_unlock(&inode->i_mutex);
4812 iput(inode);
4813 inode = NULL;
4814 continue;
4817 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4818 path->slots[0]++;
4819 ret = 1;
4820 goto next;
4822 fi = btrfs_item_ptr(leaf, path->slots[0],
4823 struct btrfs_file_extent_item);
4824 extent_type = btrfs_file_extent_type(leaf, fi);
4825 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4826 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4827 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4828 extent_key->objectid)) {
4829 path->slots[0]++;
4830 ret = 1;
4831 goto next;
4834 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4835 ext_offset = btrfs_file_extent_offset(leaf, fi);
4837 if (search_end == (u64)-1) {
4838 search_end = key.offset - ext_offset +
4839 btrfs_file_extent_ram_bytes(leaf, fi);
4842 if (!extent_locked) {
4843 lock_start = key.offset;
4844 lock_end = lock_start + num_bytes - 1;
4845 } else {
4846 if (lock_start > key.offset ||
4847 lock_end + 1 < key.offset + num_bytes) {
4848 unlock_extent(&BTRFS_I(inode)->io_tree,
4849 lock_start, lock_end, GFP_NOFS);
4850 extent_locked = 0;
4854 if (!inode) {
4855 btrfs_release_path(root, path);
4857 inode = btrfs_iget_locked(root->fs_info->sb,
4858 key.objectid, root);
4859 if (inode->i_state & I_NEW) {
4860 BTRFS_I(inode)->root = root;
4861 BTRFS_I(inode)->location.objectid =
4862 key.objectid;
4863 BTRFS_I(inode)->location.type =
4864 BTRFS_INODE_ITEM_KEY;
4865 BTRFS_I(inode)->location.offset = 0;
4866 btrfs_read_locked_inode(inode);
4867 unlock_new_inode(inode);
4870 * some code call btrfs_commit_transaction while
4871 * holding the i_mutex, so we can't use mutex_lock
4872 * here.
4874 if (is_bad_inode(inode) ||
4875 !mutex_trylock(&inode->i_mutex)) {
4876 iput(inode);
4877 inode = NULL;
4878 key.offset = (u64)-1;
4879 goto skip;
4883 if (!extent_locked) {
4884 struct btrfs_ordered_extent *ordered;
4886 btrfs_release_path(root, path);
4888 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4889 lock_end, GFP_NOFS);
4890 ordered = btrfs_lookup_first_ordered_extent(inode,
4891 lock_end);
4892 if (ordered &&
4893 ordered->file_offset <= lock_end &&
4894 ordered->file_offset + ordered->len > lock_start) {
4895 unlock_extent(&BTRFS_I(inode)->io_tree,
4896 lock_start, lock_end, GFP_NOFS);
4897 btrfs_start_ordered_extent(inode, ordered, 1);
4898 btrfs_put_ordered_extent(ordered);
4899 key.offset += num_bytes;
4900 goto skip;
4902 if (ordered)
4903 btrfs_put_ordered_extent(ordered);
4905 extent_locked = 1;
4906 continue;
4909 if (nr_extents == 1) {
4910 /* update extent pointer in place */
4911 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4912 new_extents[0].disk_bytenr);
4913 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4914 new_extents[0].disk_num_bytes);
4915 btrfs_mark_buffer_dirty(leaf);
4917 btrfs_drop_extent_cache(inode, key.offset,
4918 key.offset + num_bytes - 1, 0);
4920 ret = btrfs_inc_extent_ref(trans, root,
4921 new_extents[0].disk_bytenr,
4922 new_extents[0].disk_num_bytes,
4923 leaf->start,
4924 root->root_key.objectid,
4925 trans->transid,
4926 key.objectid);
4927 BUG_ON(ret);
4929 ret = btrfs_free_extent(trans, root,
4930 extent_key->objectid,
4931 extent_key->offset,
4932 leaf->start,
4933 btrfs_header_owner(leaf),
4934 btrfs_header_generation(leaf),
4935 key.objectid, 0);
4936 BUG_ON(ret);
4938 btrfs_release_path(root, path);
4939 key.offset += num_bytes;
4940 } else {
4941 BUG_ON(1);
4942 #if 0
4943 u64 alloc_hint;
4944 u64 extent_len;
4945 int i;
4947 * drop old extent pointer at first, then insert the
4948 * new pointers one bye one
4950 btrfs_release_path(root, path);
4951 ret = btrfs_drop_extents(trans, root, inode, key.offset,
4952 key.offset + num_bytes,
4953 key.offset, &alloc_hint);
4954 BUG_ON(ret);
4956 for (i = 0; i < nr_extents; i++) {
4957 if (ext_offset >= new_extents[i].num_bytes) {
4958 ext_offset -= new_extents[i].num_bytes;
4959 continue;
4961 extent_len = min(new_extents[i].num_bytes -
4962 ext_offset, num_bytes);
4964 ret = btrfs_insert_empty_item(trans, root,
4965 path, &key,
4966 sizeof(*fi));
4967 BUG_ON(ret);
4969 leaf = path->nodes[0];
4970 fi = btrfs_item_ptr(leaf, path->slots[0],
4971 struct btrfs_file_extent_item);
4972 btrfs_set_file_extent_generation(leaf, fi,
4973 trans->transid);
4974 btrfs_set_file_extent_type(leaf, fi,
4975 BTRFS_FILE_EXTENT_REG);
4976 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4977 new_extents[i].disk_bytenr);
4978 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4979 new_extents[i].disk_num_bytes);
4980 btrfs_set_file_extent_ram_bytes(leaf, fi,
4981 new_extents[i].ram_bytes);
4983 btrfs_set_file_extent_compression(leaf, fi,
4984 new_extents[i].compression);
4985 btrfs_set_file_extent_encryption(leaf, fi,
4986 new_extents[i].encryption);
4987 btrfs_set_file_extent_other_encoding(leaf, fi,
4988 new_extents[i].other_encoding);
4990 btrfs_set_file_extent_num_bytes(leaf, fi,
4991 extent_len);
4992 ext_offset += new_extents[i].offset;
4993 btrfs_set_file_extent_offset(leaf, fi,
4994 ext_offset);
4995 btrfs_mark_buffer_dirty(leaf);
4997 btrfs_drop_extent_cache(inode, key.offset,
4998 key.offset + extent_len - 1, 0);
5000 ret = btrfs_inc_extent_ref(trans, root,
5001 new_extents[i].disk_bytenr,
5002 new_extents[i].disk_num_bytes,
5003 leaf->start,
5004 root->root_key.objectid,
5005 trans->transid, key.objectid);
5006 BUG_ON(ret);
5007 btrfs_release_path(root, path);
5009 inode_add_bytes(inode, extent_len);
5011 ext_offset = 0;
5012 num_bytes -= extent_len;
5013 key.offset += extent_len;
5015 if (num_bytes == 0)
5016 break;
5018 BUG_ON(i >= nr_extents);
5019 #endif
5022 if (extent_locked) {
5023 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5024 lock_end, GFP_NOFS);
5025 extent_locked = 0;
5027 skip:
5028 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
5029 key.offset >= search_end)
5030 break;
5032 cond_resched();
5034 ret = 0;
5035 out:
5036 btrfs_release_path(root, path);
5037 if (inode) {
5038 mutex_unlock(&inode->i_mutex);
5039 if (extent_locked) {
5040 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5041 lock_end, GFP_NOFS);
5043 iput(inode);
5045 return ret;
5048 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
5049 struct btrfs_root *root,
5050 struct extent_buffer *buf, u64 orig_start)
5052 int level;
5053 int ret;
5055 BUG_ON(btrfs_header_generation(buf) != trans->transid);
5056 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5058 level = btrfs_header_level(buf);
5059 if (level == 0) {
5060 struct btrfs_leaf_ref *ref;
5061 struct btrfs_leaf_ref *orig_ref;
5063 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
5064 if (!orig_ref)
5065 return -ENOENT;
5067 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
5068 if (!ref) {
5069 btrfs_free_leaf_ref(root, orig_ref);
5070 return -ENOMEM;
5073 ref->nritems = orig_ref->nritems;
5074 memcpy(ref->extents, orig_ref->extents,
5075 sizeof(ref->extents[0]) * ref->nritems);
5077 btrfs_free_leaf_ref(root, orig_ref);
5079 ref->root_gen = trans->transid;
5080 ref->bytenr = buf->start;
5081 ref->owner = btrfs_header_owner(buf);
5082 ref->generation = btrfs_header_generation(buf);
5084 ret = btrfs_add_leaf_ref(root, ref, 0);
5085 WARN_ON(ret);
5086 btrfs_free_leaf_ref(root, ref);
5088 return 0;
5091 static noinline int invalidate_extent_cache(struct btrfs_root *root,
5092 struct extent_buffer *leaf,
5093 struct btrfs_block_group_cache *group,
5094 struct btrfs_root *target_root)
5096 struct btrfs_key key;
5097 struct inode *inode = NULL;
5098 struct btrfs_file_extent_item *fi;
5099 u64 num_bytes;
5100 u64 skip_objectid = 0;
5101 u32 nritems;
5102 u32 i;
5104 nritems = btrfs_header_nritems(leaf);
5105 for (i = 0; i < nritems; i++) {
5106 btrfs_item_key_to_cpu(leaf, &key, i);
5107 if (key.objectid == skip_objectid ||
5108 key.type != BTRFS_EXTENT_DATA_KEY)
5109 continue;
5110 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
5111 if (btrfs_file_extent_type(leaf, fi) ==
5112 BTRFS_FILE_EXTENT_INLINE)
5113 continue;
5114 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5115 continue;
5116 if (!inode || inode->i_ino != key.objectid) {
5117 iput(inode);
5118 inode = btrfs_ilookup(target_root->fs_info->sb,
5119 key.objectid, target_root, 1);
5121 if (!inode) {
5122 skip_objectid = key.objectid;
5123 continue;
5125 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5127 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
5128 key.offset + num_bytes - 1, GFP_NOFS);
5129 btrfs_drop_extent_cache(inode, key.offset,
5130 key.offset + num_bytes - 1, 1);
5131 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
5132 key.offset + num_bytes - 1, GFP_NOFS);
5133 cond_resched();
5135 iput(inode);
5136 return 0;
5139 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
5140 struct btrfs_root *root,
5141 struct extent_buffer *leaf,
5142 struct btrfs_block_group_cache *group,
5143 struct inode *reloc_inode)
5145 struct btrfs_key key;
5146 struct btrfs_key extent_key;
5147 struct btrfs_file_extent_item *fi;
5148 struct btrfs_leaf_ref *ref;
5149 struct disk_extent *new_extent;
5150 u64 bytenr;
5151 u64 num_bytes;
5152 u32 nritems;
5153 u32 i;
5154 int ext_index;
5155 int nr_extent;
5156 int ret;
5158 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
5159 BUG_ON(!new_extent);
5161 ref = btrfs_lookup_leaf_ref(root, leaf->start);
5162 BUG_ON(!ref);
5164 ext_index = -1;
5165 nritems = btrfs_header_nritems(leaf);
5166 for (i = 0; i < nritems; i++) {
5167 btrfs_item_key_to_cpu(leaf, &key, i);
5168 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
5169 continue;
5170 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
5171 if (btrfs_file_extent_type(leaf, fi) ==
5172 BTRFS_FILE_EXTENT_INLINE)
5173 continue;
5174 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5175 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
5176 if (bytenr == 0)
5177 continue;
5179 ext_index++;
5180 if (bytenr >= group->key.objectid + group->key.offset ||
5181 bytenr + num_bytes <= group->key.objectid)
5182 continue;
5184 extent_key.objectid = bytenr;
5185 extent_key.offset = num_bytes;
5186 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
5187 nr_extent = 1;
5188 ret = get_new_locations(reloc_inode, &extent_key,
5189 group->key.objectid, 1,
5190 &new_extent, &nr_extent);
5191 if (ret > 0)
5192 continue;
5193 BUG_ON(ret < 0);
5195 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
5196 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
5197 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
5198 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
5200 btrfs_set_file_extent_disk_bytenr(leaf, fi,
5201 new_extent->disk_bytenr);
5202 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5203 new_extent->disk_num_bytes);
5204 btrfs_mark_buffer_dirty(leaf);
5206 ret = btrfs_inc_extent_ref(trans, root,
5207 new_extent->disk_bytenr,
5208 new_extent->disk_num_bytes,
5209 leaf->start,
5210 root->root_key.objectid,
5211 trans->transid, key.objectid);
5212 BUG_ON(ret);
5213 ret = btrfs_free_extent(trans, root,
5214 bytenr, num_bytes, leaf->start,
5215 btrfs_header_owner(leaf),
5216 btrfs_header_generation(leaf),
5217 key.objectid, 0);
5218 BUG_ON(ret);
5219 cond_resched();
5221 kfree(new_extent);
5222 BUG_ON(ext_index + 1 != ref->nritems);
5223 btrfs_free_leaf_ref(root, ref);
5224 return 0;
5227 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
5228 struct btrfs_root *root)
5230 struct btrfs_root *reloc_root;
5231 int ret;
5233 if (root->reloc_root) {
5234 reloc_root = root->reloc_root;
5235 root->reloc_root = NULL;
5236 list_add(&reloc_root->dead_list,
5237 &root->fs_info->dead_reloc_roots);
5239 btrfs_set_root_bytenr(&reloc_root->root_item,
5240 reloc_root->node->start);
5241 btrfs_set_root_level(&root->root_item,
5242 btrfs_header_level(reloc_root->node));
5243 memset(&reloc_root->root_item.drop_progress, 0,
5244 sizeof(struct btrfs_disk_key));
5245 reloc_root->root_item.drop_level = 0;
5247 ret = btrfs_update_root(trans, root->fs_info->tree_root,
5248 &reloc_root->root_key,
5249 &reloc_root->root_item);
5250 BUG_ON(ret);
5252 return 0;
5255 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
5257 struct btrfs_trans_handle *trans;
5258 struct btrfs_root *reloc_root;
5259 struct btrfs_root *prev_root = NULL;
5260 struct list_head dead_roots;
5261 int ret;
5262 unsigned long nr;
5264 INIT_LIST_HEAD(&dead_roots);
5265 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
5267 while (!list_empty(&dead_roots)) {
5268 reloc_root = list_entry(dead_roots.prev,
5269 struct btrfs_root, dead_list);
5270 list_del_init(&reloc_root->dead_list);
5272 BUG_ON(reloc_root->commit_root != NULL);
5273 while (1) {
5274 trans = btrfs_join_transaction(root, 1);
5275 BUG_ON(!trans);
5277 mutex_lock(&root->fs_info->drop_mutex);
5278 ret = btrfs_drop_snapshot(trans, reloc_root);
5279 if (ret != -EAGAIN)
5280 break;
5281 mutex_unlock(&root->fs_info->drop_mutex);
5283 nr = trans->blocks_used;
5284 ret = btrfs_end_transaction(trans, root);
5285 BUG_ON(ret);
5286 btrfs_btree_balance_dirty(root, nr);
5289 free_extent_buffer(reloc_root->node);
5291 ret = btrfs_del_root(trans, root->fs_info->tree_root,
5292 &reloc_root->root_key);
5293 BUG_ON(ret);
5294 mutex_unlock(&root->fs_info->drop_mutex);
5296 nr = trans->blocks_used;
5297 ret = btrfs_end_transaction(trans, root);
5298 BUG_ON(ret);
5299 btrfs_btree_balance_dirty(root, nr);
5301 kfree(prev_root);
5302 prev_root = reloc_root;
5304 if (prev_root) {
5305 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
5306 kfree(prev_root);
5308 return 0;
5311 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
5313 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
5314 return 0;
5317 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
5319 struct btrfs_root *reloc_root;
5320 struct btrfs_trans_handle *trans;
5321 struct btrfs_key location;
5322 int found;
5323 int ret;
5325 mutex_lock(&root->fs_info->tree_reloc_mutex);
5326 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
5327 BUG_ON(ret);
5328 found = !list_empty(&root->fs_info->dead_reloc_roots);
5329 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5331 if (found) {
5332 trans = btrfs_start_transaction(root, 1);
5333 BUG_ON(!trans);
5334 ret = btrfs_commit_transaction(trans, root);
5335 BUG_ON(ret);
5338 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5339 location.offset = (u64)-1;
5340 location.type = BTRFS_ROOT_ITEM_KEY;
5342 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5343 BUG_ON(!reloc_root);
5344 btrfs_orphan_cleanup(reloc_root);
5345 return 0;
5348 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
5349 struct btrfs_root *root)
5351 struct btrfs_root *reloc_root;
5352 struct extent_buffer *eb;
5353 struct btrfs_root_item *root_item;
5354 struct btrfs_key root_key;
5355 int ret;
5357 BUG_ON(!root->ref_cows);
5358 if (root->reloc_root)
5359 return 0;
5361 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5362 BUG_ON(!root_item);
5364 ret = btrfs_copy_root(trans, root, root->commit_root,
5365 &eb, BTRFS_TREE_RELOC_OBJECTID);
5366 BUG_ON(ret);
5368 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5369 root_key.offset = root->root_key.objectid;
5370 root_key.type = BTRFS_ROOT_ITEM_KEY;
5372 memcpy(root_item, &root->root_item, sizeof(root_item));
5373 btrfs_set_root_refs(root_item, 0);
5374 btrfs_set_root_bytenr(root_item, eb->start);
5375 btrfs_set_root_level(root_item, btrfs_header_level(eb));
5376 btrfs_set_root_generation(root_item, trans->transid);
5378 btrfs_tree_unlock(eb);
5379 free_extent_buffer(eb);
5381 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5382 &root_key, root_item);
5383 BUG_ON(ret);
5384 kfree(root_item);
5386 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5387 &root_key);
5388 BUG_ON(!reloc_root);
5389 reloc_root->last_trans = trans->transid;
5390 reloc_root->commit_root = NULL;
5391 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5393 root->reloc_root = reloc_root;
5394 return 0;
5398 * Core function of space balance.
5400 * The idea is using reloc trees to relocate tree blocks in reference
5401 * counted roots. There is one reloc tree for each subvol, and all
5402 * reloc trees share same root key objectid. Reloc trees are snapshots
5403 * of the latest committed roots of subvols (root->commit_root).
5405 * To relocate a tree block referenced by a subvol, there are two steps.
5406 * COW the block through subvol's reloc tree, then update block pointer
5407 * in the subvol to point to the new block. Since all reloc trees share
5408 * same root key objectid, doing special handing for tree blocks owned
5409 * by them is easy. Once a tree block has been COWed in one reloc tree,
5410 * we can use the resulting new block directly when the same block is
5411 * required to COW again through other reloc trees. By this way, relocated
5412 * tree blocks are shared between reloc trees, so they are also shared
5413 * between subvols.
5415 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
5416 struct btrfs_root *root,
5417 struct btrfs_path *path,
5418 struct btrfs_key *first_key,
5419 struct btrfs_ref_path *ref_path,
5420 struct btrfs_block_group_cache *group,
5421 struct inode *reloc_inode)
5423 struct btrfs_root *reloc_root;
5424 struct extent_buffer *eb = NULL;
5425 struct btrfs_key *keys;
5426 u64 *nodes;
5427 int level;
5428 int shared_level;
5429 int lowest_level = 0;
5430 int ret;
5432 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5433 lowest_level = ref_path->owner_objectid;
5435 if (!root->ref_cows) {
5436 path->lowest_level = lowest_level;
5437 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5438 BUG_ON(ret < 0);
5439 path->lowest_level = 0;
5440 btrfs_release_path(root, path);
5441 return 0;
5444 mutex_lock(&root->fs_info->tree_reloc_mutex);
5445 ret = init_reloc_tree(trans, root);
5446 BUG_ON(ret);
5447 reloc_root = root->reloc_root;
5449 shared_level = ref_path->shared_level;
5450 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5452 keys = ref_path->node_keys;
5453 nodes = ref_path->new_nodes;
5454 memset(&keys[shared_level + 1], 0,
5455 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5456 memset(&nodes[shared_level + 1], 0,
5457 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5459 if (nodes[lowest_level] == 0) {
5460 path->lowest_level = lowest_level;
5461 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5462 0, 1);
5463 BUG_ON(ret);
5464 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5465 eb = path->nodes[level];
5466 if (!eb || eb == reloc_root->node)
5467 break;
5468 nodes[level] = eb->start;
5469 if (level == 0)
5470 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5471 else
5472 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5474 if (nodes[0] &&
5475 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5476 eb = path->nodes[0];
5477 ret = replace_extents_in_leaf(trans, reloc_root, eb,
5478 group, reloc_inode);
5479 BUG_ON(ret);
5481 btrfs_release_path(reloc_root, path);
5482 } else {
5483 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5484 lowest_level);
5485 BUG_ON(ret);
5489 * replace tree blocks in the fs tree with tree blocks in
5490 * the reloc tree.
5492 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5493 BUG_ON(ret < 0);
5495 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5496 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5497 0, 0);
5498 BUG_ON(ret);
5499 extent_buffer_get(path->nodes[0]);
5500 eb = path->nodes[0];
5501 btrfs_release_path(reloc_root, path);
5502 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5503 BUG_ON(ret);
5504 free_extent_buffer(eb);
5507 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5508 path->lowest_level = 0;
5509 return 0;
5512 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
5513 struct btrfs_root *root,
5514 struct btrfs_path *path,
5515 struct btrfs_key *first_key,
5516 struct btrfs_ref_path *ref_path)
5518 int ret;
5520 ret = relocate_one_path(trans, root, path, first_key,
5521 ref_path, NULL, NULL);
5522 BUG_ON(ret);
5524 if (root == root->fs_info->extent_root)
5525 btrfs_extent_post_op(trans, root);
5527 return 0;
5530 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
5531 struct btrfs_root *extent_root,
5532 struct btrfs_path *path,
5533 struct btrfs_key *extent_key)
5535 int ret;
5537 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5538 if (ret)
5539 goto out;
5540 ret = btrfs_del_item(trans, extent_root, path);
5541 out:
5542 btrfs_release_path(extent_root, path);
5543 return ret;
5546 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
5547 struct btrfs_ref_path *ref_path)
5549 struct btrfs_key root_key;
5551 root_key.objectid = ref_path->root_objectid;
5552 root_key.type = BTRFS_ROOT_ITEM_KEY;
5553 if (is_cowonly_root(ref_path->root_objectid))
5554 root_key.offset = 0;
5555 else
5556 root_key.offset = (u64)-1;
5558 return btrfs_read_fs_root_no_name(fs_info, &root_key);
5561 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
5562 struct btrfs_path *path,
5563 struct btrfs_key *extent_key,
5564 struct btrfs_block_group_cache *group,
5565 struct inode *reloc_inode, int pass)
5567 struct btrfs_trans_handle *trans;
5568 struct btrfs_root *found_root;
5569 struct btrfs_ref_path *ref_path = NULL;
5570 struct disk_extent *new_extents = NULL;
5571 int nr_extents = 0;
5572 int loops;
5573 int ret;
5574 int level;
5575 struct btrfs_key first_key;
5576 u64 prev_block = 0;
5579 trans = btrfs_start_transaction(extent_root, 1);
5580 BUG_ON(!trans);
5582 if (extent_key->objectid == 0) {
5583 ret = del_extent_zero(trans, extent_root, path, extent_key);
5584 goto out;
5587 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5588 if (!ref_path) {
5589 ret = -ENOMEM;
5590 goto out;
5593 for (loops = 0; ; loops++) {
5594 if (loops == 0) {
5595 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5596 extent_key->objectid);
5597 } else {
5598 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5600 if (ret < 0)
5601 goto out;
5602 if (ret > 0)
5603 break;
5605 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5606 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5607 continue;
5609 found_root = read_ref_root(extent_root->fs_info, ref_path);
5610 BUG_ON(!found_root);
5612 * for reference counted tree, only process reference paths
5613 * rooted at the latest committed root.
5615 if (found_root->ref_cows &&
5616 ref_path->root_generation != found_root->root_key.offset)
5617 continue;
5619 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5620 if (pass == 0) {
5622 * copy data extents to new locations
5624 u64 group_start = group->key.objectid;
5625 ret = relocate_data_extent(reloc_inode,
5626 extent_key,
5627 group_start);
5628 if (ret < 0)
5629 goto out;
5630 break;
5632 level = 0;
5633 } else {
5634 level = ref_path->owner_objectid;
5637 if (prev_block != ref_path->nodes[level]) {
5638 struct extent_buffer *eb;
5639 u64 block_start = ref_path->nodes[level];
5640 u64 block_size = btrfs_level_size(found_root, level);
5642 eb = read_tree_block(found_root, block_start,
5643 block_size, 0);
5644 btrfs_tree_lock(eb);
5645 BUG_ON(level != btrfs_header_level(eb));
5647 if (level == 0)
5648 btrfs_item_key_to_cpu(eb, &first_key, 0);
5649 else
5650 btrfs_node_key_to_cpu(eb, &first_key, 0);
5652 btrfs_tree_unlock(eb);
5653 free_extent_buffer(eb);
5654 prev_block = block_start;
5657 btrfs_record_root_in_trans(found_root);
5658 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5660 * try to update data extent references while
5661 * keeping metadata shared between snapshots.
5663 if (pass == 1) {
5664 ret = relocate_one_path(trans, found_root,
5665 path, &first_key, ref_path,
5666 group, reloc_inode);
5667 if (ret < 0)
5668 goto out;
5669 continue;
5672 * use fallback method to process the remaining
5673 * references.
5675 if (!new_extents) {
5676 u64 group_start = group->key.objectid;
5677 new_extents = kmalloc(sizeof(*new_extents),
5678 GFP_NOFS);
5679 nr_extents = 1;
5680 ret = get_new_locations(reloc_inode,
5681 extent_key,
5682 group_start, 1,
5683 &new_extents,
5684 &nr_extents);
5685 if (ret)
5686 goto out;
5688 ret = replace_one_extent(trans, found_root,
5689 path, extent_key,
5690 &first_key, ref_path,
5691 new_extents, nr_extents);
5692 } else {
5693 ret = relocate_tree_block(trans, found_root, path,
5694 &first_key, ref_path);
5696 if (ret < 0)
5697 goto out;
5699 ret = 0;
5700 out:
5701 btrfs_end_transaction(trans, extent_root);
5702 kfree(new_extents);
5703 kfree(ref_path);
5704 return ret;
5707 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5709 u64 num_devices;
5710 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5711 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5713 num_devices = root->fs_info->fs_devices->rw_devices;
5714 if (num_devices == 1) {
5715 stripped |= BTRFS_BLOCK_GROUP_DUP;
5716 stripped = flags & ~stripped;
5718 /* turn raid0 into single device chunks */
5719 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5720 return stripped;
5722 /* turn mirroring into duplication */
5723 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5724 BTRFS_BLOCK_GROUP_RAID10))
5725 return stripped | BTRFS_BLOCK_GROUP_DUP;
5726 return flags;
5727 } else {
5728 /* they already had raid on here, just return */
5729 if (flags & stripped)
5730 return flags;
5732 stripped |= BTRFS_BLOCK_GROUP_DUP;
5733 stripped = flags & ~stripped;
5735 /* switch duplicated blocks with raid1 */
5736 if (flags & BTRFS_BLOCK_GROUP_DUP)
5737 return stripped | BTRFS_BLOCK_GROUP_RAID1;
5739 /* turn single device chunks into raid0 */
5740 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5742 return flags;
5745 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5746 struct btrfs_block_group_cache *shrink_block_group,
5747 int force)
5749 struct btrfs_trans_handle *trans;
5750 u64 new_alloc_flags;
5751 u64 calc;
5753 spin_lock(&shrink_block_group->lock);
5754 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5755 spin_unlock(&shrink_block_group->lock);
5757 trans = btrfs_start_transaction(root, 1);
5758 spin_lock(&shrink_block_group->lock);
5760 new_alloc_flags = update_block_group_flags(root,
5761 shrink_block_group->flags);
5762 if (new_alloc_flags != shrink_block_group->flags) {
5763 calc =
5764 btrfs_block_group_used(&shrink_block_group->item);
5765 } else {
5766 calc = shrink_block_group->key.offset;
5768 spin_unlock(&shrink_block_group->lock);
5770 do_chunk_alloc(trans, root->fs_info->extent_root,
5771 calc + 2 * 1024 * 1024, new_alloc_flags, force);
5773 btrfs_end_transaction(trans, root);
5774 } else
5775 spin_unlock(&shrink_block_group->lock);
5776 return 0;
5779 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5780 struct btrfs_root *root,
5781 u64 objectid, u64 size)
5783 struct btrfs_path *path;
5784 struct btrfs_inode_item *item;
5785 struct extent_buffer *leaf;
5786 int ret;
5788 path = btrfs_alloc_path();
5789 if (!path)
5790 return -ENOMEM;
5792 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5793 if (ret)
5794 goto out;
5796 leaf = path->nodes[0];
5797 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5798 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5799 btrfs_set_inode_generation(leaf, item, 1);
5800 btrfs_set_inode_size(leaf, item, size);
5801 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5802 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
5803 btrfs_mark_buffer_dirty(leaf);
5804 btrfs_release_path(root, path);
5805 out:
5806 btrfs_free_path(path);
5807 return ret;
5810 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
5811 struct btrfs_block_group_cache *group)
5813 struct inode *inode = NULL;
5814 struct btrfs_trans_handle *trans;
5815 struct btrfs_root *root;
5816 struct btrfs_key root_key;
5817 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5818 int err = 0;
5820 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5821 root_key.type = BTRFS_ROOT_ITEM_KEY;
5822 root_key.offset = (u64)-1;
5823 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5824 if (IS_ERR(root))
5825 return ERR_CAST(root);
5827 trans = btrfs_start_transaction(root, 1);
5828 BUG_ON(!trans);
5830 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5831 if (err)
5832 goto out;
5834 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5835 BUG_ON(err);
5837 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5838 group->key.offset, 0, group->key.offset,
5839 0, 0, 0);
5840 BUG_ON(err);
5842 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5843 if (inode->i_state & I_NEW) {
5844 BTRFS_I(inode)->root = root;
5845 BTRFS_I(inode)->location.objectid = objectid;
5846 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5847 BTRFS_I(inode)->location.offset = 0;
5848 btrfs_read_locked_inode(inode);
5849 unlock_new_inode(inode);
5850 BUG_ON(is_bad_inode(inode));
5851 } else {
5852 BUG_ON(1);
5854 BTRFS_I(inode)->index_cnt = group->key.objectid;
5856 err = btrfs_orphan_add(trans, inode);
5857 out:
5858 btrfs_end_transaction(trans, root);
5859 if (err) {
5860 if (inode)
5861 iput(inode);
5862 inode = ERR_PTR(err);
5864 return inode;
5867 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
5870 struct btrfs_ordered_sum *sums;
5871 struct btrfs_sector_sum *sector_sum;
5872 struct btrfs_ordered_extent *ordered;
5873 struct btrfs_root *root = BTRFS_I(inode)->root;
5874 struct list_head list;
5875 size_t offset;
5876 int ret;
5877 u64 disk_bytenr;
5879 INIT_LIST_HEAD(&list);
5881 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
5882 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
5884 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
5885 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
5886 disk_bytenr + len - 1, &list);
5888 while (!list_empty(&list)) {
5889 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
5890 list_del_init(&sums->list);
5892 sector_sum = sums->sums;
5893 sums->bytenr = ordered->start;
5895 offset = 0;
5896 while (offset < sums->len) {
5897 sector_sum->bytenr += ordered->start - disk_bytenr;
5898 sector_sum++;
5899 offset += root->sectorsize;
5902 btrfs_add_ordered_sum(inode, ordered, sums);
5904 btrfs_put_ordered_extent(ordered);
5905 return 0;
5908 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5910 struct btrfs_trans_handle *trans;
5911 struct btrfs_path *path;
5912 struct btrfs_fs_info *info = root->fs_info;
5913 struct extent_buffer *leaf;
5914 struct inode *reloc_inode;
5915 struct btrfs_block_group_cache *block_group;
5916 struct btrfs_key key;
5917 u64 skipped;
5918 u64 cur_byte;
5919 u64 total_found;
5920 u32 nritems;
5921 int ret;
5922 int progress;
5923 int pass = 0;
5925 root = root->fs_info->extent_root;
5927 block_group = btrfs_lookup_block_group(info, group_start);
5928 BUG_ON(!block_group);
5930 printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
5931 (unsigned long long)block_group->key.objectid,
5932 (unsigned long long)block_group->flags);
5934 path = btrfs_alloc_path();
5935 BUG_ON(!path);
5937 reloc_inode = create_reloc_inode(info, block_group);
5938 BUG_ON(IS_ERR(reloc_inode));
5940 __alloc_chunk_for_shrink(root, block_group, 1);
5941 set_block_group_readonly(block_group);
5943 btrfs_start_delalloc_inodes(info->tree_root);
5944 btrfs_wait_ordered_extents(info->tree_root, 0);
5945 again:
5946 skipped = 0;
5947 total_found = 0;
5948 progress = 0;
5949 key.objectid = block_group->key.objectid;
5950 key.offset = 0;
5951 key.type = 0;
5952 cur_byte = key.objectid;
5954 trans = btrfs_start_transaction(info->tree_root, 1);
5955 btrfs_commit_transaction(trans, info->tree_root);
5957 mutex_lock(&root->fs_info->cleaner_mutex);
5958 btrfs_clean_old_snapshots(info->tree_root);
5959 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5960 mutex_unlock(&root->fs_info->cleaner_mutex);
5962 while (1) {
5963 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5964 if (ret < 0)
5965 goto out;
5966 next:
5967 leaf = path->nodes[0];
5968 nritems = btrfs_header_nritems(leaf);
5969 if (path->slots[0] >= nritems) {
5970 ret = btrfs_next_leaf(root, path);
5971 if (ret < 0)
5972 goto out;
5973 if (ret == 1) {
5974 ret = 0;
5975 break;
5977 leaf = path->nodes[0];
5978 nritems = btrfs_header_nritems(leaf);
5981 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5983 if (key.objectid >= block_group->key.objectid +
5984 block_group->key.offset)
5985 break;
5987 if (progress && need_resched()) {
5988 btrfs_release_path(root, path);
5989 cond_resched();
5990 progress = 0;
5991 continue;
5993 progress = 1;
5995 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
5996 key.objectid + key.offset <= cur_byte) {
5997 path->slots[0]++;
5998 goto next;
6001 total_found++;
6002 cur_byte = key.objectid + key.offset;
6003 btrfs_release_path(root, path);
6005 __alloc_chunk_for_shrink(root, block_group, 0);
6006 ret = relocate_one_extent(root, path, &key, block_group,
6007 reloc_inode, pass);
6008 BUG_ON(ret < 0);
6009 if (ret > 0)
6010 skipped++;
6012 key.objectid = cur_byte;
6013 key.type = 0;
6014 key.offset = 0;
6017 btrfs_release_path(root, path);
6019 if (pass == 0) {
6020 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
6021 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
6024 if (total_found > 0) {
6025 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
6026 (unsigned long long)total_found, pass);
6027 pass++;
6028 if (total_found == skipped && pass > 2) {
6029 iput(reloc_inode);
6030 reloc_inode = create_reloc_inode(info, block_group);
6031 pass = 0;
6033 goto again;
6036 /* delete reloc_inode */
6037 iput(reloc_inode);
6039 /* unpin extents in this range */
6040 trans = btrfs_start_transaction(info->tree_root, 1);
6041 btrfs_commit_transaction(trans, info->tree_root);
6043 spin_lock(&block_group->lock);
6044 WARN_ON(block_group->pinned > 0);
6045 WARN_ON(block_group->reserved > 0);
6046 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
6047 spin_unlock(&block_group->lock);
6048 put_block_group(block_group);
6049 ret = 0;
6050 out:
6051 btrfs_free_path(path);
6052 return ret;
6055 static int find_first_block_group(struct btrfs_root *root,
6056 struct btrfs_path *path, struct btrfs_key *key)
6058 int ret = 0;
6059 struct btrfs_key found_key;
6060 struct extent_buffer *leaf;
6061 int slot;
6063 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6064 if (ret < 0)
6065 goto out;
6067 while (1) {
6068 slot = path->slots[0];
6069 leaf = path->nodes[0];
6070 if (slot >= btrfs_header_nritems(leaf)) {
6071 ret = btrfs_next_leaf(root, path);
6072 if (ret == 0)
6073 continue;
6074 if (ret < 0)
6075 goto out;
6076 break;
6078 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6080 if (found_key.objectid >= key->objectid &&
6081 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6082 ret = 0;
6083 goto out;
6085 path->slots[0]++;
6087 ret = -ENOENT;
6088 out:
6089 return ret;
6092 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6094 struct btrfs_block_group_cache *block_group;
6095 struct rb_node *n;
6097 spin_lock(&info->block_group_cache_lock);
6098 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6099 block_group = rb_entry(n, struct btrfs_block_group_cache,
6100 cache_node);
6101 rb_erase(&block_group->cache_node,
6102 &info->block_group_cache_tree);
6103 spin_unlock(&info->block_group_cache_lock);
6105 btrfs_remove_free_space_cache(block_group);
6106 down_write(&block_group->space_info->groups_sem);
6107 list_del(&block_group->list);
6108 up_write(&block_group->space_info->groups_sem);
6110 WARN_ON(atomic_read(&block_group->count) != 1);
6111 kfree(block_group);
6113 spin_lock(&info->block_group_cache_lock);
6115 spin_unlock(&info->block_group_cache_lock);
6116 return 0;
6119 int btrfs_read_block_groups(struct btrfs_root *root)
6121 struct btrfs_path *path;
6122 int ret;
6123 struct btrfs_block_group_cache *cache;
6124 struct btrfs_fs_info *info = root->fs_info;
6125 struct btrfs_space_info *space_info;
6126 struct btrfs_key key;
6127 struct btrfs_key found_key;
6128 struct extent_buffer *leaf;
6130 root = info->extent_root;
6131 key.objectid = 0;
6132 key.offset = 0;
6133 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6134 path = btrfs_alloc_path();
6135 if (!path)
6136 return -ENOMEM;
6138 while (1) {
6139 ret = find_first_block_group(root, path, &key);
6140 if (ret > 0) {
6141 ret = 0;
6142 goto error;
6144 if (ret != 0)
6145 goto error;
6147 leaf = path->nodes[0];
6148 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6149 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6150 if (!cache) {
6151 ret = -ENOMEM;
6152 break;
6155 atomic_set(&cache->count, 1);
6156 spin_lock_init(&cache->lock);
6157 mutex_init(&cache->alloc_mutex);
6158 mutex_init(&cache->cache_mutex);
6159 INIT_LIST_HEAD(&cache->list);
6160 read_extent_buffer(leaf, &cache->item,
6161 btrfs_item_ptr_offset(leaf, path->slots[0]),
6162 sizeof(cache->item));
6163 memcpy(&cache->key, &found_key, sizeof(found_key));
6165 key.objectid = found_key.objectid + found_key.offset;
6166 btrfs_release_path(root, path);
6167 cache->flags = btrfs_block_group_flags(&cache->item);
6169 ret = update_space_info(info, cache->flags, found_key.offset,
6170 btrfs_block_group_used(&cache->item),
6171 &space_info);
6172 BUG_ON(ret);
6173 cache->space_info = space_info;
6174 down_write(&space_info->groups_sem);
6175 list_add_tail(&cache->list, &space_info->block_groups);
6176 up_write(&space_info->groups_sem);
6178 ret = btrfs_add_block_group_cache(root->fs_info, cache);
6179 BUG_ON(ret);
6181 set_avail_alloc_bits(root->fs_info, cache->flags);
6182 if (btrfs_chunk_readonly(root, cache->key.objectid))
6183 set_block_group_readonly(cache);
6185 ret = 0;
6186 error:
6187 btrfs_free_path(path);
6188 return ret;
6191 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
6192 struct btrfs_root *root, u64 bytes_used,
6193 u64 type, u64 chunk_objectid, u64 chunk_offset,
6194 u64 size)
6196 int ret;
6197 struct btrfs_root *extent_root;
6198 struct btrfs_block_group_cache *cache;
6200 extent_root = root->fs_info->extent_root;
6202 root->fs_info->last_trans_new_blockgroup = trans->transid;
6204 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6205 if (!cache)
6206 return -ENOMEM;
6208 cache->key.objectid = chunk_offset;
6209 cache->key.offset = size;
6210 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
6211 atomic_set(&cache->count, 1);
6212 spin_lock_init(&cache->lock);
6213 mutex_init(&cache->alloc_mutex);
6214 mutex_init(&cache->cache_mutex);
6215 INIT_LIST_HEAD(&cache->list);
6217 btrfs_set_block_group_used(&cache->item, bytes_used);
6218 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
6219 cache->flags = type;
6220 btrfs_set_block_group_flags(&cache->item, type);
6222 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
6223 &cache->space_info);
6224 BUG_ON(ret);
6225 down_write(&cache->space_info->groups_sem);
6226 list_add_tail(&cache->list, &cache->space_info->block_groups);
6227 up_write(&cache->space_info->groups_sem);
6229 ret = btrfs_add_block_group_cache(root->fs_info, cache);
6230 BUG_ON(ret);
6232 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
6233 sizeof(cache->item));
6234 BUG_ON(ret);
6236 finish_current_insert(trans, extent_root, 0);
6237 ret = del_pending_extents(trans, extent_root, 0);
6238 BUG_ON(ret);
6239 set_avail_alloc_bits(extent_root->fs_info, type);
6241 return 0;
6244 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6245 struct btrfs_root *root, u64 group_start)
6247 struct btrfs_path *path;
6248 struct btrfs_block_group_cache *block_group;
6249 struct btrfs_key key;
6250 int ret;
6252 root = root->fs_info->extent_root;
6254 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
6255 BUG_ON(!block_group);
6256 BUG_ON(!block_group->ro);
6258 memcpy(&key, &block_group->key, sizeof(key));
6260 path = btrfs_alloc_path();
6261 BUG_ON(!path);
6263 spin_lock(&root->fs_info->block_group_cache_lock);
6264 rb_erase(&block_group->cache_node,
6265 &root->fs_info->block_group_cache_tree);
6266 spin_unlock(&root->fs_info->block_group_cache_lock);
6267 btrfs_remove_free_space_cache(block_group);
6268 down_write(&block_group->space_info->groups_sem);
6269 list_del(&block_group->list);
6270 up_write(&block_group->space_info->groups_sem);
6272 spin_lock(&block_group->space_info->lock);
6273 block_group->space_info->total_bytes -= block_group->key.offset;
6274 block_group->space_info->bytes_readonly -= block_group->key.offset;
6275 spin_unlock(&block_group->space_info->lock);
6276 block_group->space_info->full = 0;
6278 put_block_group(block_group);
6279 put_block_group(block_group);
6281 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6282 if (ret > 0)
6283 ret = -EIO;
6284 if (ret < 0)
6285 goto out;
6287 ret = btrfs_del_item(trans, root, path);
6288 out:
6289 btrfs_free_path(path);
6290 return ret;