more return code checking
[btrfs-progs-unstable.git] / extent-tree.c
blob074f4b182f169df358a81727797b820fbeb28014
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include "kerncompat.h"
4 #include "radix-tree.h"
5 #include "ctree.h"
6 #include "disk-io.h"
7 #include "print-tree.h"
9 /*
10 * pending extents are blocks that we're trying to allocate in the extent
11 * map while trying to grow the map because of other allocations. To avoid
12 * recursing, they are tagged in the radix tree and cleaned up after
13 * other allocations are done. The pending tag is also used in the same
14 * manner for deletes.
16 #define CTREE_EXTENT_PENDING 0
19 * find all the blocks marked as pending in the radix tree and remove
20 * them from the extent map
22 static int del_pending_extents(struct ctree_root *extent_root)
24 int ret;
25 struct key key;
26 struct tree_buffer *gang[4];
27 int i;
28 struct ctree_path path;
30 while(1) {
31 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
32 (void **)gang, 0,
33 ARRAY_SIZE(gang),
34 CTREE_EXTENT_PENDING);
35 if (!ret)
36 break;
37 for (i = 0; i < ret; i++) {
38 key.objectid = gang[i]->blocknr;
39 key.flags = 0;
40 key.offset = 1;
41 init_path(&path);
42 ret = search_slot(extent_root, &key, &path, 0);
43 if (ret) {
44 print_tree(extent_root, extent_root->node);
45 printf("unable to find %Lu\n", key.objectid);
46 BUG();
47 // FIXME undo it and return sane
48 return ret;
50 ret = del_item(extent_root, &path);
51 if (ret) {
52 BUG();
53 return ret;
55 release_path(extent_root, &path);
56 radix_tree_tag_clear(&extent_root->cache_radix,
57 gang[i]->blocknr,
58 CTREE_EXTENT_PENDING);
59 tree_block_release(extent_root, gang[i]);
62 return 0;
66 * remove an extent from the root, returns 0 on success
68 int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
70 struct ctree_path path;
71 struct key key;
72 struct ctree_root *extent_root = root->extent_root;
73 struct tree_buffer *t;
74 int pending_ret;
75 int ret;
76 key.objectid = blocknr;
77 key.flags = 0;
78 key.offset = num_blocks;
79 if (root == extent_root) {
80 t = read_tree_block(root, key.objectid);
81 radix_tree_tag_set(&root->cache_radix, key.objectid,
82 CTREE_EXTENT_PENDING);
83 return 0;
85 init_path(&path);
86 ret = search_slot(extent_root, &key, &path, 0);
87 if (ret) {
88 print_tree(extent_root, extent_root->node);
89 printf("failed to find %Lu\n", key.objectid);
90 BUG();
92 ret = del_item(extent_root, &path);
93 if (ret)
94 BUG();
95 release_path(extent_root, &path);
96 pending_ret = del_pending_extents(root->extent_root);
97 return ret ? ret : pending_ret;
101 * walks the btree of allocated extents and find a hole of a given size.
102 * The key ins is changed to record the hole:
103 * ins->objectid == block start
104 * ins->flags = 0
105 * ins->offset == number of blocks
106 * Any available blocks before search_start are skipped.
108 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
109 u64 search_start, u64 search_end, struct key *ins)
111 struct ctree_path path;
112 struct key *key;
113 int ret;
114 u64 hole_size = 0;
115 int slot = 0;
116 u64 last_block;
117 int start_found;
118 struct leaf *l;
119 struct ctree_root * root = orig_root->extent_root;
121 check_failed:
122 init_path(&path);
123 ins->objectid = search_start;
124 ins->offset = 0;
125 ins->flags = 0;
126 start_found = 0;
127 ret = search_slot(root, ins, &path, 0);
128 if (ret < 0)
129 goto error;
131 while (1) {
132 l = &path.nodes[0]->leaf;
133 slot = path.slots[0];
134 if (slot >= l->header.nritems) {
135 ret = next_leaf(root, &path);
136 if (ret == 0)
137 continue;
138 if (ret < 0)
139 goto error;
140 if (!start_found) {
141 ins->objectid = search_start;
142 ins->offset = num_blocks;
143 start_found = 1;
144 goto check_pending;
146 ins->objectid = last_block > search_start ?
147 last_block : search_start;
148 ins->offset = num_blocks;
149 goto check_pending;
151 key = &l->items[slot].key;
152 if (key->objectid >= search_start) {
153 if (start_found) {
154 hole_size = key->objectid - last_block;
155 if (hole_size > num_blocks) {
156 ins->objectid = last_block;
157 ins->offset = num_blocks;
158 goto check_pending;
160 } else
161 start_found = 1;
162 last_block = key->objectid + key->offset;
164 path.slots[0]++;
166 // FIXME -ENOSPC
167 check_pending:
168 /* we have to make sure we didn't find an extent that has already
169 * been allocated by the map tree or the original allocation
171 release_path(root, &path);
172 BUG_ON(ins->objectid < search_start);
173 if (orig_root->extent_root == orig_root) {
174 BUG_ON(num_blocks != 1);
175 if ((root->current_insert.objectid <= ins->objectid &&
176 root->current_insert.objectid +
177 root->current_insert.offset > ins->objectid) ||
178 (root->current_insert.objectid > ins->objectid &&
179 root->current_insert.objectid <= ins->objectid +
180 ins->offset) ||
181 radix_tree_tag_get(&root->cache_radix, ins->objectid,
182 CTREE_EXTENT_PENDING)) {
183 search_start = ins->objectid + 1;
184 goto check_failed;
187 if (ins->offset != 1)
188 BUG();
189 return 0;
190 error:
191 release_path(root, &path);
192 return ret;
196 * insert all of the pending extents reserved during the original
197 * allocation. (CTREE_EXTENT_PENDING). Returns zero if it all worked out
199 static int insert_pending_extents(struct ctree_root *extent_root)
201 int ret;
202 struct key key;
203 struct extent_item item;
204 struct tree_buffer *gang[4];
205 int i;
207 // FIXME -ENOSPC
208 item.refs = 1;
209 item.owner = extent_root->node->node.header.parentid;
210 while(1) {
211 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
212 (void **)gang, 0,
213 ARRAY_SIZE(gang),
214 CTREE_EXTENT_PENDING);
215 if (!ret)
216 break;
217 for (i = 0; i < ret; i++) {
218 key.objectid = gang[i]->blocknr;
219 key.flags = 0;
220 key.offset = 1;
221 ret = insert_item(extent_root, &key, &item,
222 sizeof(item));
223 if (ret) {
224 BUG();
225 // FIXME undo it and return sane
226 return ret;
228 radix_tree_tag_clear(&extent_root->cache_radix,
229 gang[i]->blocknr,
230 CTREE_EXTENT_PENDING);
231 tree_block_release(extent_root, gang[i]);
234 return 0;
238 * finds a free extent and does all the dirty work required for allocation
239 * returns the key for the extent through ins, and a tree buffer for
240 * the first block of the extent through buf.
242 * returns 0 if everything worked, non-zero otherwise.
244 int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
245 u64 search_end, u64 owner, struct key *ins,
246 struct tree_buffer **buf)
248 int ret;
249 int pending_ret;
250 struct extent_item extent_item;
251 extent_item.refs = 1;
252 extent_item.owner = owner;
254 ret = find_free_extent(root, num_blocks, search_start, search_end, ins);
255 if (ret)
256 return ret;
257 if (root != root->extent_root) {
258 memcpy(&root->extent_root->current_insert, ins, sizeof(*ins));
259 ret = insert_item(root->extent_root, ins, &extent_item,
260 sizeof(extent_item));
261 memset(&root->extent_root->current_insert, 0,
262 sizeof(struct key));
263 pending_ret = insert_pending_extents(root->extent_root);
264 if (ret)
265 return ret;
266 if (pending_ret)
267 return pending_ret;
268 *buf = find_tree_block(root, ins->objectid);
269 return 0;
271 /* we're allocating an extent for the extent tree, don't recurse */
272 BUG_ON(ins->offset != 1);
273 *buf = find_tree_block(root, ins->objectid);
274 BUG_ON(!*buf);
275 radix_tree_tag_set(&root->cache_radix, ins->objectid,
276 CTREE_EXTENT_PENDING);
277 (*buf)->count++;
278 return 0;
283 * helper function to allocate a block for a given tree
284 * returns the tree buffer or NULL.
286 struct tree_buffer *alloc_free_block(struct ctree_root *root)
288 struct key ins;
289 int ret;
290 struct tree_buffer *buf = NULL;
292 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
293 root->node->node.header.parentid,
294 &ins, &buf);
296 if (ret) {
297 BUG();
298 return NULL;
300 if (root != root->extent_root)
301 BUG_ON(radix_tree_tag_get(&root->extent_root->cache_radix,
302 buf->blocknr, CTREE_EXTENT_PENDING));
303 return buf;