Add support for multiple devices per filesystem
[btrfs-progs-unstable/devel.git] / volumes.c
blob2fb5a2005cc00ff82edf50e6ceaae42347046feb
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include "ctree.h"
19 #include "disk-io.h"
20 #include "transaction.h"
21 #include "print-tree.h"
22 #include "volumes.h"
24 struct map_lookup {
25 struct cache_extent ce;
26 struct btrfs_device *dev;
27 u64 physical;
31 * this uses a pretty simple search, the expectation is that it is
32 * called very infrequently and that a given device has a small number
33 * of extents
35 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
36 struct btrfs_device *device,
37 struct btrfs_path *path,
38 u64 num_bytes, u64 *start)
40 struct btrfs_key key;
41 struct btrfs_root *root = device->dev_root;
42 struct btrfs_dev_extent *dev_extent = NULL;
43 u64 hole_size = 0;
44 u64 last_byte = 0;
45 u64 search_start = 0;
46 u64 search_end = device->total_bytes;
47 int ret;
48 int slot = 0;
49 int start_found;
50 struct extent_buffer *l;
52 start_found = 0;
53 path->reada = 2;
55 /* FIXME use last free of some kind */
57 key.objectid = device->devid;
58 key.offset = search_start;
59 key.type = BTRFS_DEV_EXTENT_KEY;
60 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
61 if (ret < 0)
62 goto error;
63 ret = btrfs_previous_item(root, path, 0, key.type);
64 if (ret < 0)
65 goto error;
66 l = path->nodes[0];
67 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
68 while (1) {
69 l = path->nodes[0];
70 slot = path->slots[0];
71 if (slot >= btrfs_header_nritems(l)) {
72 ret = btrfs_next_leaf(root, path);
73 if (ret == 0)
74 continue;
75 if (ret < 0)
76 goto error;
77 no_more_items:
78 if (!start_found) {
79 if (search_start >= search_end) {
80 ret = -ENOSPC;
81 goto error;
83 *start = search_start;
84 start_found = 1;
85 goto check_pending;
87 *start = last_byte > search_start ?
88 last_byte : search_start;
89 if (search_end <= *start) {
90 ret = -ENOSPC;
91 goto error;
93 goto check_pending;
95 btrfs_item_key_to_cpu(l, &key, slot);
97 if (key.objectid < device->devid)
98 goto next;
100 if (key.objectid > device->devid)
101 goto no_more_items;
103 if (key.offset >= search_start && key.offset > last_byte &&
104 start_found) {
105 if (last_byte < search_start)
106 last_byte = search_start;
107 hole_size = key.offset - last_byte;
108 if (key.offset > last_byte &&
109 hole_size >= num_bytes) {
110 *start = last_byte;
111 goto check_pending;
114 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
115 goto next;
118 start_found = 1;
119 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
120 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
121 next:
122 path->slots[0]++;
123 cond_resched();
125 check_pending:
126 /* we have to make sure we didn't find an extent that has already
127 * been allocated by the map tree or the original allocation
129 btrfs_release_path(root, path);
130 BUG_ON(*start < search_start);
132 if (*start + num_bytes >= search_end) {
133 ret = -ENOSPC;
134 goto error;
136 /* check for pending inserts here */
137 return 0;
139 error:
140 btrfs_release_path(root, path);
141 return ret;
144 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
145 struct btrfs_device *device,
146 u64 owner, u64 num_bytes, u64 *start)
148 int ret;
149 struct btrfs_path *path;
150 struct btrfs_root *root = device->dev_root;
151 struct btrfs_dev_extent *extent;
152 struct extent_buffer *leaf;
153 struct btrfs_key key;
155 path = btrfs_alloc_path();
156 if (!path)
157 return -ENOMEM;
159 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
160 if (ret)
161 goto err;
163 key.objectid = device->devid;
164 key.offset = *start;
165 key.type = BTRFS_DEV_EXTENT_KEY;
166 ret = btrfs_insert_empty_item(trans, root, path, &key,
167 sizeof(*extent));
168 BUG_ON(ret);
170 leaf = path->nodes[0];
171 extent = btrfs_item_ptr(leaf, path->slots[0],
172 struct btrfs_dev_extent);
173 btrfs_set_dev_extent_owner(leaf, extent, owner);
174 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
175 btrfs_mark_buffer_dirty(leaf);
176 err:
177 btrfs_free_path(path);
178 return ret;
181 static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
183 struct btrfs_path *path;
184 int ret;
185 struct btrfs_key key;
186 struct btrfs_key found_key;
188 path = btrfs_alloc_path();
189 BUG_ON(!path);
191 key.objectid = (u64)-1;
192 key.offset = (u64)-1;
193 key.type = BTRFS_CHUNK_ITEM_KEY;
195 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
196 if (ret < 0)
197 goto error;
199 BUG_ON(ret == 0);
201 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
202 if (ret) {
203 *objectid = 0;
204 } else {
205 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
206 path->slots[0]);
207 *objectid = found_key.objectid + found_key.offset;
209 ret = 0;
210 error:
211 btrfs_free_path(path);
212 return ret;
215 static struct btrfs_device *next_device(struct list_head *head,
216 struct list_head *last)
218 struct list_head *next = last->next;
219 struct btrfs_device *dev;
221 if (list_empty(head))
222 return NULL;
224 if (next == head)
225 next = next->next;
227 dev = list_entry(next, struct btrfs_device, dev_list);
228 return dev;
231 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
232 u64 *objectid)
234 int ret;
235 struct btrfs_key key;
236 struct btrfs_key found_key;
238 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
239 key.type = BTRFS_DEV_ITEM_KEY;
240 key.offset = (u64)-1;
242 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
243 if (ret < 0)
244 goto error;
246 BUG_ON(ret == 0);
248 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
249 BTRFS_DEV_ITEM_KEY);
250 if (ret) {
251 *objectid = 1;
252 } else {
253 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
254 path->slots[0]);
255 *objectid = found_key.offset + 1;
257 ret = 0;
258 error:
259 btrfs_release_path(root, path);
260 return ret;
264 * the device information is stored in the chunk root
265 * the btrfs_device struct should be fully filled in
267 int btrfs_add_device(struct btrfs_trans_handle *trans,
268 struct btrfs_root *root,
269 struct btrfs_device *device)
271 int ret;
272 struct btrfs_path *path;
273 struct btrfs_dev_item *dev_item;
274 struct extent_buffer *leaf;
275 struct btrfs_key key;
276 unsigned long ptr;
277 u64 free_devid;
279 root = root->fs_info->chunk_root;
281 path = btrfs_alloc_path();
282 if (!path)
283 return -ENOMEM;
285 ret = find_next_devid(root, path, &free_devid);
286 if (ret)
287 goto out;
289 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
290 key.type = BTRFS_DEV_ITEM_KEY;
291 key.offset = free_devid;
293 ret = btrfs_insert_empty_item(trans, root, path, &key,
294 sizeof(*dev_item) + device->name_len);
295 if (ret)
296 goto out;
298 leaf = path->nodes[0];
299 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
301 btrfs_set_device_id(leaf, dev_item, device->devid);
302 btrfs_set_device_type(leaf, dev_item, device->type);
303 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
304 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
305 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
306 btrfs_set_device_rdev(leaf, dev_item, device->rdev);
307 btrfs_set_device_partition(leaf, dev_item, device->partition);
308 btrfs_set_device_name_len(leaf, dev_item, device->name_len);
309 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
310 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
312 ptr = (unsigned long)btrfs_device_name(dev_item);
313 write_extent_buffer(leaf, device->name, ptr, device->name_len);
315 ptr = (unsigned long)btrfs_device_uuid(dev_item);
316 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
317 btrfs_mark_buffer_dirty(leaf);
318 ret = 0;
320 out:
321 btrfs_free_path(path);
322 return ret;
324 int btrfs_update_device(struct btrfs_trans_handle *trans,
325 struct btrfs_device *device)
327 int ret;
328 struct btrfs_path *path;
329 struct btrfs_root *root;
330 struct btrfs_dev_item *dev_item;
331 struct extent_buffer *leaf;
332 struct btrfs_key key;
334 root = device->dev_root->fs_info->chunk_root;
336 path = btrfs_alloc_path();
337 if (!path)
338 return -ENOMEM;
340 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
341 key.type = BTRFS_DEV_ITEM_KEY;
342 key.offset = device->devid;
344 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
345 if (ret < 0)
346 goto out;
348 if (ret > 0) {
349 ret = -ENOENT;
350 goto out;
353 leaf = path->nodes[0];
354 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
356 btrfs_set_device_id(leaf, dev_item, device->devid);
357 btrfs_set_device_type(leaf, dev_item, device->type);
358 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
359 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
360 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
361 btrfs_set_device_rdev(leaf, dev_item, device->rdev);
362 btrfs_set_device_partition(leaf, dev_item, device->partition);
363 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
364 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
365 btrfs_mark_buffer_dirty(leaf);
367 out:
368 btrfs_free_path(path);
369 return ret;
372 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
373 struct btrfs_root *root,
374 struct btrfs_key *key,
375 struct btrfs_chunk *chunk, int item_size)
377 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
378 struct btrfs_disk_key disk_key;
379 u32 array_size;
380 u8 *ptr;
382 array_size = btrfs_super_sys_array_size(super_copy);
383 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
384 return -EFBIG;
386 ptr = super_copy->sys_chunk_array + array_size;
387 btrfs_cpu_key_to_disk(&disk_key, key);
388 memcpy(ptr, &disk_key, sizeof(disk_key));
389 ptr += sizeof(disk_key);
390 memcpy(ptr, chunk, item_size);
391 item_size += sizeof(disk_key);
392 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
393 return 0;
396 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
397 struct btrfs_root *extent_root, u64 *start,
398 u64 *num_bytes, u32 type)
400 u64 dev_offset;
401 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
402 struct btrfs_stripe *stripes;
403 struct btrfs_device *device = NULL;
404 struct btrfs_chunk *chunk;
405 struct list_head *dev_list = &extent_root->fs_info->devices;
406 struct list_head *last_dev = extent_root->fs_info->last_device;
407 struct map_lookup *map;
408 u64 physical;
409 u64 calc_size;
410 int num_stripes;
411 int ret;
412 int index = 0;
413 struct btrfs_key key;
416 ret = find_next_chunk(chunk_root, &key.objectid);
417 if (ret)
418 return ret;
420 num_stripes = 1;
421 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
422 if (!chunk)
423 return -ENOMEM;
425 stripes = &chunk->stripe;
427 while(index < num_stripes) {
428 device = next_device(dev_list, last_dev);
429 BUG_ON(!device);
430 last_dev = &device->dev_list;
431 extent_root->fs_info->last_device = last_dev;
433 if (index == 0) {
434 int mask = device->io_align;
435 calc_size = (device->total_bytes * 95) / 100;
436 calc_size = device->total_bytes - calc_size;
437 calc_size = (calc_size / mask) * mask;
438 *num_bytes = calc_size;
441 ret = btrfs_alloc_dev_extent(trans, device,
442 key.objectid,
443 calc_size, &dev_offset);
444 BUG_ON(ret);
446 device->bytes_used += calc_size;
447 ret = btrfs_update_device(trans, device);
448 BUG_ON(ret);
450 btrfs_set_stack_stripe_devid(stripes + index, device->devid);
451 btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
452 physical = dev_offset;
453 index++;
456 /* key.objectid was set above */
457 key.offset = *num_bytes;
458 key.type = BTRFS_CHUNK_ITEM_KEY;
459 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
460 btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
461 btrfs_set_stack_chunk_type(chunk, type);
462 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
463 btrfs_set_stack_chunk_io_align(chunk, extent_root->sectorsize);
464 btrfs_set_stack_chunk_io_width(chunk, extent_root->sectorsize);
465 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
467 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
468 btrfs_chunk_item_size(num_stripes));
469 BUG_ON(ret);
470 *start = key.objectid;
472 map = kmalloc(sizeof(*map), GFP_NOFS);
473 if (!map)
474 return -ENOMEM;
476 map->ce.start = key.objectid;
477 map->ce.size = key.offset;
479 map->physical = physical;
480 map->dev = device;
482 if (!map->dev) {
483 kfree(map);
484 return -EIO;
486 ret = insert_existing_cache_extent(
487 &extent_root->fs_info->mapping_tree.cache_tree,
488 &map->ce);
489 BUG_ON(ret);
491 kfree(chunk);
492 return ret;
495 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
497 cache_tree_init(&tree->cache_tree);
500 int btrfs_map_block(struct btrfs_mapping_tree *map_tree,
501 u64 logical, u64 *phys, u64 *length,
502 struct btrfs_device **dev)
504 struct cache_extent *ce;
505 struct map_lookup *map;
506 u64 offset;
508 ce = find_first_cache_extent(&map_tree->cache_tree, logical);
509 BUG_ON(!ce);
510 BUG_ON(ce->start > logical || ce->start + ce->size < logical);
511 map = container_of(ce, struct map_lookup, ce);
512 offset = logical - ce->start;
513 *phys = map->physical + offset;
514 *length = ce->size - offset;
515 *dev = map->dev;
516 return 0;
519 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid)
521 struct btrfs_device *dev;
522 struct list_head *cur = root->fs_info->devices.next;
523 struct list_head *head = &root->fs_info->devices;
525 while(cur != head) {
526 dev = list_entry(cur, struct btrfs_device, dev_list);
527 if (dev->devid == devid)
528 return dev;
529 cur = cur->next;
531 return NULL;
534 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
535 struct extent_buffer *leaf,
536 struct btrfs_chunk *chunk)
538 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
539 struct map_lookup *map;
540 struct cache_extent *ce;
541 u64 logical;
542 u64 length;
543 u64 devid;
544 int ret;
546 logical = key->objectid;
547 length = key->offset;
548 ce = find_first_cache_extent(&map_tree->cache_tree, logical);
550 /* already mapped? */
551 if (ce && ce->start <= logical && ce->start + ce->size > logical) {
552 return 0;
555 map = kmalloc(sizeof(*map), GFP_NOFS);
556 if (!map)
557 return -ENOMEM;
559 map->ce.start = logical;
560 map->ce.size = length;
562 map->physical = btrfs_stripe_offset_nr(leaf, chunk, 0);
563 devid = btrfs_stripe_devid_nr(leaf, chunk, 0);
564 map->dev = btrfs_find_device(root, devid);
566 if (!map->dev) {
567 kfree(map);
568 return -EIO;
570 ret = insert_existing_cache_extent(&map_tree->cache_tree, &map->ce);
571 BUG_ON(ret);
573 return 0;
576 static int fill_device_from_item(struct extent_buffer *leaf,
577 struct btrfs_dev_item *dev_item,
578 struct btrfs_device *device)
580 unsigned long ptr;
581 char *name;
583 device->devid = btrfs_device_id(leaf, dev_item);
584 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
585 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
586 device->type = btrfs_device_type(leaf, dev_item);
587 device->io_align = btrfs_device_io_align(leaf, dev_item);
588 device->io_width = btrfs_device_io_width(leaf, dev_item);
589 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
590 device->rdev = btrfs_device_rdev(leaf, dev_item);
591 device->partition = btrfs_device_partition(leaf, dev_item);
592 device->name_len = btrfs_device_name_len(leaf, dev_item);
594 ptr = (unsigned long)btrfs_device_uuid(dev_item);
595 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
597 name = kmalloc(device->name_len + 1, GFP_NOFS);
598 if (!name)
599 return -ENOMEM;
600 device->name = name;
601 ptr = (unsigned long)btrfs_device_name(dev_item);
602 read_extent_buffer(leaf, name, ptr, device->name_len);
603 name[device->name_len] = '\0';
604 return 0;
607 static int read_one_dev(struct btrfs_root *root, struct btrfs_key *key,
608 struct extent_buffer *leaf,
609 struct btrfs_dev_item *dev_item)
611 struct btrfs_device *device;
612 u64 devid;
613 int ret;
615 devid = btrfs_device_id(leaf, dev_item);
616 if (btrfs_find_device(root, devid))
617 return 0;
619 device = kmalloc(sizeof(*device), GFP_NOFS);
620 if (!device)
621 return -ENOMEM;
623 fill_device_from_item(leaf, dev_item, device);
624 device->dev_root = root->fs_info->dev_root;
625 device->fd = 0;
626 list_add(&device->dev_list, &root->fs_info->devices);
627 memcpy(&device->dev_key, key, sizeof(*key));
629 ret = btrfs_open_device(device);
630 if (ret) {
631 kfree(device);
633 return ret;
636 int btrfs_read_sys_array(struct btrfs_root *root)
638 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
639 struct extent_buffer *sb = root->fs_info->sb_buffer;
640 struct btrfs_disk_key *disk_key;
641 struct btrfs_dev_item *dev_item;
642 struct btrfs_chunk *chunk;
643 struct btrfs_key key;
644 u32 num_stripes;
645 u32 array_size;
646 u32 len = 0;
647 u8 *ptr;
648 unsigned long sb_ptr;
649 u32 cur;
650 int ret;
651 int dev_only = 1;
653 array_size = btrfs_super_sys_array_size(super_copy);
656 * we do this loop twice, once for the device items and
657 * once for all of the chunks. This way there are device
658 * structs filled in for every chunk
660 again:
661 ptr = super_copy->sys_chunk_array;
662 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
663 cur = 0;
665 while (cur < array_size) {
666 disk_key = (struct btrfs_disk_key *)ptr;
667 btrfs_disk_key_to_cpu(&key, disk_key);
669 len = sizeof(*disk_key);
670 ptr += len;
671 sb_ptr += len;
672 cur += len;
674 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID &&
675 key.type == BTRFS_DEV_ITEM_KEY) {
676 dev_item = (struct btrfs_dev_item *)sb_ptr;
677 if (dev_only) {
678 ret = read_one_dev(root, &key, sb, dev_item);
679 BUG_ON(ret);
681 len = sizeof(*dev_item);
682 len += btrfs_device_name_len(sb, dev_item);
683 } else if (key.type == BTRFS_CHUNK_ITEM_KEY) {
685 chunk = (struct btrfs_chunk *)sb_ptr;
686 if (!dev_only) {
687 ret = read_one_chunk(root, &key, sb, chunk);
688 BUG_ON(ret);
690 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
691 len = btrfs_chunk_item_size(num_stripes);
692 } else {
693 BUG();
695 ptr += len;
696 sb_ptr += len;
697 cur += len;
699 if (dev_only == 1) {
700 dev_only = 0;
701 goto again;
703 return 0;
706 int btrfs_read_chunk_tree(struct btrfs_root *root)
708 struct btrfs_path *path;
709 struct extent_buffer *leaf;
710 struct btrfs_key key;
711 struct btrfs_key found_key;
712 int ret;
713 int slot;
715 root = root->fs_info->chunk_root;
717 path = btrfs_alloc_path();
718 if (!path)
719 return -ENOMEM;
721 /* first we search for all of the device items, and then we
722 * read in all of the chunk items. This way we can create chunk
723 * mappings that reference all of the devices that are afound
725 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
726 key.offset = 0;
727 key.type = 0;
728 again:
729 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
730 while(1) {
731 leaf = path->nodes[0];
732 slot = path->slots[0];
733 if (slot >= btrfs_header_nritems(leaf)) {
734 ret = btrfs_next_leaf(root, path);
735 if (ret == 0)
736 continue;
737 if (ret < 0)
738 goto error;
739 break;
741 btrfs_item_key_to_cpu(leaf, &found_key, slot);
742 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
743 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
744 break;
745 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
746 struct btrfs_dev_item *dev_item;
747 dev_item = btrfs_item_ptr(leaf, slot,
748 struct btrfs_dev_item);
749 ret = read_one_dev(root, &found_key, leaf,
750 dev_item);
751 BUG_ON(ret);
753 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
754 struct btrfs_chunk *chunk;
755 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
756 ret = read_one_chunk(root, &found_key, leaf, chunk);
758 path->slots[0]++;
760 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
761 key.objectid = 0;
762 btrfs_release_path(root, path);
763 goto again;
766 btrfs_free_path(path);
767 ret = 0;
768 error:
769 return ret;