2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 600
22 #include <sys/types.h>
24 #include <uuid/uuid.h>
29 #include "transaction.h"
30 #include "print-tree.h"
34 struct btrfs_device
*dev
;
38 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
39 (sizeof(struct btrfs_bio_stripe) * (n)))
41 static LIST_HEAD(fs_uuids
);
43 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
46 struct btrfs_device
*dev
;
47 struct list_head
*cur
;
49 list_for_each(cur
, head
) {
50 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
51 if (dev
->devid
== devid
&&
52 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
59 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
61 struct list_head
*cur
;
62 struct btrfs_fs_devices
*fs_devices
;
64 list_for_each(cur
, &fs_uuids
) {
65 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
66 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
72 static int device_list_add(const char *path
,
73 struct btrfs_super_block
*disk_super
,
74 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
76 struct btrfs_device
*device
;
77 struct btrfs_fs_devices
*fs_devices
;
78 u64 found_transid
= btrfs_super_generation(disk_super
);
80 fs_devices
= find_fsid(disk_super
->fsid
);
82 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
85 INIT_LIST_HEAD(&fs_devices
->devices
);
86 list_add(&fs_devices
->list
, &fs_uuids
);
87 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
88 fs_devices
->latest_devid
= devid
;
89 fs_devices
->latest_trans
= found_transid
;
90 fs_devices
->lowest_devid
= (u64
)-1;
93 device
= __find_device(&fs_devices
->devices
, devid
,
94 disk_super
->dev_item
.uuid
);
97 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
99 /* we can safely leave the fs_devices entry around */
102 device
->devid
= devid
;
103 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
105 device
->name
= kstrdup(path
, GFP_NOFS
);
110 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
111 device
->total_devs
= btrfs_super_num_devices(disk_super
);
112 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
113 device
->total_bytes
=
114 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
116 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
117 list_add(&device
->dev_list
, &fs_devices
->devices
);
118 device
->fs_devices
= fs_devices
;
119 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
120 char *name
= strdup(path
);
128 if (found_transid
> fs_devices
->latest_trans
) {
129 fs_devices
->latest_devid
= devid
;
130 fs_devices
->latest_trans
= found_transid
;
132 if (fs_devices
->lowest_devid
> devid
) {
133 fs_devices
->lowest_devid
= devid
;
135 *fs_devices_ret
= fs_devices
;
139 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
141 struct btrfs_fs_devices
*seed_devices
;
142 struct list_head
*cur
;
143 struct btrfs_device
*device
;
145 list_for_each(cur
, &fs_devices
->devices
) {
146 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
149 device
->writeable
= 0;
152 seed_devices
= fs_devices
->seed
;
153 fs_devices
->seed
= NULL
;
155 fs_devices
= seed_devices
;
162 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
165 struct list_head
*head
= &fs_devices
->devices
;
166 struct list_head
*cur
;
167 struct btrfs_device
*device
;
170 list_for_each(cur
, head
) {
171 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
173 fd
= open(device
->name
, flags
);
179 if (device
->devid
== fs_devices
->latest_devid
)
180 fs_devices
->latest_bdev
= fd
;
181 if (device
->devid
== fs_devices
->lowest_devid
)
182 fs_devices
->lowest_bdev
= fd
;
185 device
->writeable
= 1;
189 btrfs_close_devices(fs_devices
);
193 int btrfs_scan_one_device(int fd
, const char *path
,
194 struct btrfs_fs_devices
**fs_devices_ret
,
195 u64
*total_devs
, u64 super_offset
)
197 struct btrfs_super_block
*disk_super
;
208 disk_super
= (struct btrfs_super_block
*)buf
;
209 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
);
214 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
215 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
218 *total_devs
= btrfs_super_num_devices(disk_super
);
219 uuid_unparse(disk_super
->fsid
, uuidbuf
);
221 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
230 * this uses a pretty simple search, the expectation is that it is
231 * called very infrequently and that a given device has a small number
234 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
235 struct btrfs_device
*device
,
236 struct btrfs_path
*path
,
237 u64 num_bytes
, u64
*start
)
239 struct btrfs_key key
;
240 struct btrfs_root
*root
= device
->dev_root
;
241 struct btrfs_dev_extent
*dev_extent
= NULL
;
244 u64 search_start
= 0;
245 u64 search_end
= device
->total_bytes
;
249 struct extent_buffer
*l
;
254 /* FIXME use last free of some kind */
256 /* we don't want to overwrite the superblock on the drive,
257 * so we make sure to start at an offset of at least 1MB
259 search_start
= max((u64
)1024 * 1024, search_start
);
261 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
262 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
264 key
.objectid
= device
->devid
;
265 key
.offset
= search_start
;
266 key
.type
= BTRFS_DEV_EXTENT_KEY
;
267 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
270 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
274 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
277 slot
= path
->slots
[0];
278 if (slot
>= btrfs_header_nritems(l
)) {
279 ret
= btrfs_next_leaf(root
, path
);
286 if (search_start
>= search_end
) {
290 *start
= search_start
;
294 *start
= last_byte
> search_start
?
295 last_byte
: search_start
;
296 if (search_end
<= *start
) {
302 btrfs_item_key_to_cpu(l
, &key
, slot
);
304 if (key
.objectid
< device
->devid
)
307 if (key
.objectid
> device
->devid
)
310 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
312 if (last_byte
< search_start
)
313 last_byte
= search_start
;
314 hole_size
= key
.offset
- last_byte
;
315 if (key
.offset
> last_byte
&&
316 hole_size
>= num_bytes
) {
321 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
326 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
327 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
333 /* we have to make sure we didn't find an extent that has already
334 * been allocated by the map tree or the original allocation
336 btrfs_release_path(root
, path
);
337 BUG_ON(*start
< search_start
);
339 if (*start
+ num_bytes
> search_end
) {
343 /* check for pending inserts here */
347 btrfs_release_path(root
, path
);
351 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
352 struct btrfs_device
*device
,
353 u64 chunk_tree
, u64 chunk_objectid
,
355 u64 num_bytes
, u64
*start
)
358 struct btrfs_path
*path
;
359 struct btrfs_root
*root
= device
->dev_root
;
360 struct btrfs_dev_extent
*extent
;
361 struct extent_buffer
*leaf
;
362 struct btrfs_key key
;
364 path
= btrfs_alloc_path();
368 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
373 key
.objectid
= device
->devid
;
375 key
.type
= BTRFS_DEV_EXTENT_KEY
;
376 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
380 leaf
= path
->nodes
[0];
381 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
382 struct btrfs_dev_extent
);
383 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
384 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
385 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
387 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
388 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
391 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
392 btrfs_mark_buffer_dirty(leaf
);
394 btrfs_free_path(path
);
398 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
400 struct btrfs_path
*path
;
402 struct btrfs_key key
;
403 struct btrfs_chunk
*chunk
;
404 struct btrfs_key found_key
;
406 path
= btrfs_alloc_path();
409 key
.objectid
= objectid
;
410 key
.offset
= (u64
)-1;
411 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
413 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
419 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
423 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
425 if (found_key
.objectid
!= objectid
)
428 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
430 *offset
= found_key
.offset
+
431 btrfs_chunk_length(path
->nodes
[0], chunk
);
436 btrfs_free_path(path
);
440 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
444 struct btrfs_key key
;
445 struct btrfs_key found_key
;
447 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
448 key
.type
= BTRFS_DEV_ITEM_KEY
;
449 key
.offset
= (u64
)-1;
451 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
457 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
462 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
464 *objectid
= found_key
.offset
+ 1;
468 btrfs_release_path(root
, path
);
473 * the device information is stored in the chunk root
474 * the btrfs_device struct should be fully filled in
476 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
477 struct btrfs_root
*root
,
478 struct btrfs_device
*device
)
481 struct btrfs_path
*path
;
482 struct btrfs_dev_item
*dev_item
;
483 struct extent_buffer
*leaf
;
484 struct btrfs_key key
;
488 root
= root
->fs_info
->chunk_root
;
490 path
= btrfs_alloc_path();
494 ret
= find_next_devid(root
, path
, &free_devid
);
498 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
499 key
.type
= BTRFS_DEV_ITEM_KEY
;
500 key
.offset
= free_devid
;
502 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
507 leaf
= path
->nodes
[0];
508 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
510 device
->devid
= free_devid
;
511 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
512 btrfs_set_device_generation(leaf
, dev_item
, 0);
513 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
514 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
515 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
516 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
517 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
518 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
519 btrfs_set_device_group(leaf
, dev_item
, 0);
520 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
521 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
522 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
524 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
525 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
526 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
527 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
528 btrfs_mark_buffer_dirty(leaf
);
532 btrfs_free_path(path
);
536 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
537 struct btrfs_device
*device
)
540 struct btrfs_path
*path
;
541 struct btrfs_root
*root
;
542 struct btrfs_dev_item
*dev_item
;
543 struct extent_buffer
*leaf
;
544 struct btrfs_key key
;
546 root
= device
->dev_root
->fs_info
->chunk_root
;
548 path
= btrfs_alloc_path();
552 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
553 key
.type
= BTRFS_DEV_ITEM_KEY
;
554 key
.offset
= device
->devid
;
556 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
565 leaf
= path
->nodes
[0];
566 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
568 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
569 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
570 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
571 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
572 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
573 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
574 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
575 btrfs_mark_buffer_dirty(leaf
);
578 btrfs_free_path(path
);
582 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
583 struct btrfs_root
*root
,
584 struct btrfs_key
*key
,
585 struct btrfs_chunk
*chunk
, int item_size
)
587 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
588 struct btrfs_disk_key disk_key
;
592 array_size
= btrfs_super_sys_array_size(super_copy
);
593 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
596 ptr
= super_copy
->sys_chunk_array
+ array_size
;
597 btrfs_cpu_key_to_disk(&disk_key
, key
);
598 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
599 ptr
+= sizeof(disk_key
);
600 memcpy(ptr
, chunk
, item_size
);
601 item_size
+= sizeof(disk_key
);
602 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
606 static u64
div_factor(u64 num
, int factor
)
614 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
617 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
619 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
620 return calc_size
* (num_stripes
/ sub_stripes
);
622 return calc_size
* num_stripes
;
626 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
627 struct btrfs_root
*extent_root
, u64
*start
,
628 u64
*num_bytes
, u64 type
)
631 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
632 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
633 struct btrfs_stripe
*stripes
;
634 struct btrfs_device
*device
= NULL
;
635 struct btrfs_chunk
*chunk
;
636 struct list_head private_devs
;
637 struct list_head
*dev_list
= &extent_root
->fs_info
->fs_devices
->devices
;
638 struct list_head
*cur
;
639 struct map_lookup
*map
;
640 int min_stripe_size
= 1 * 1024 * 1024;
641 u64 calc_size
= 8 * 1024 * 1024;
643 u64 max_chunk_size
= 4 * calc_size
;
653 int stripe_len
= 64 * 1024;
654 struct btrfs_key key
;
656 if (list_empty(dev_list
)) {
660 if (type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
661 BTRFS_BLOCK_GROUP_RAID10
|
662 BTRFS_BLOCK_GROUP_DUP
)) {
663 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
664 calc_size
= 8 * 1024 * 1024;
665 max_chunk_size
= calc_size
* 2;
666 min_stripe_size
= 1 * 1024 * 1024;
667 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
668 calc_size
= 1024 * 1024 * 1024;
669 max_chunk_size
= 10 * calc_size
;
670 min_stripe_size
= 64 * 1024 * 1024;
671 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
672 calc_size
= 1024 * 1024 * 1024;
673 max_chunk_size
= 4 * calc_size
;
674 min_stripe_size
= 32 * 1024 * 1024;
677 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
678 num_stripes
= min_t(u64
, 2,
679 btrfs_super_num_devices(&info
->super_copy
));
684 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
688 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
689 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
692 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
693 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
696 num_stripes
&= ~(u32
)1;
701 /* we don't want a chunk larger than 10% of the FS */
702 percent_max
= div_factor(btrfs_super_total_bytes(&info
->super_copy
), 1);
703 max_chunk_size
= min(percent_max
, max_chunk_size
);
706 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
708 calc_size
= max_chunk_size
;
709 calc_size
/= num_stripes
;
710 calc_size
/= stripe_len
;
711 calc_size
*= stripe_len
;
713 /* we don't want tiny stripes */
714 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
716 calc_size
/= stripe_len
;
717 calc_size
*= stripe_len
;
718 INIT_LIST_HEAD(&private_devs
);
719 cur
= dev_list
->next
;
722 if (type
& BTRFS_BLOCK_GROUP_DUP
)
723 min_free
= calc_size
* 2;
725 min_free
= calc_size
;
727 /* build a private list of devices we will allocate from */
728 while(index
< num_stripes
) {
729 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
730 avail
= device
->total_bytes
- device
->bytes_used
;
732 if (avail
>= min_free
) {
733 list_move_tail(&device
->dev_list
, &private_devs
);
735 if (type
& BTRFS_BLOCK_GROUP_DUP
)
737 } else if (avail
> max_avail
)
742 if (index
< num_stripes
) {
743 list_splice(&private_devs
, dev_list
);
744 if (index
>= min_stripes
) {
746 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
747 num_stripes
/= sub_stripes
;
748 num_stripes
*= sub_stripes
;
753 if (!looped
&& max_avail
> 0) {
755 calc_size
= max_avail
;
760 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
761 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
762 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
767 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
771 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
777 stripes
= &chunk
->stripe
;
778 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
779 num_stripes
, sub_stripes
);
781 while(index
< num_stripes
) {
782 struct btrfs_stripe
*stripe
;
783 BUG_ON(list_empty(&private_devs
));
784 cur
= private_devs
.next
;
785 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
787 /* loop over this device again if we're doing a dup group */
788 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
789 (index
== num_stripes
- 1))
790 list_move_tail(&device
->dev_list
, dev_list
);
792 ret
= btrfs_alloc_dev_extent(trans
, device
,
793 info
->chunk_root
->root_key
.objectid
,
794 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
795 calc_size
, &dev_offset
);
798 device
->bytes_used
+= calc_size
;
799 ret
= btrfs_update_device(trans
, device
);
802 map
->stripes
[index
].dev
= device
;
803 map
->stripes
[index
].physical
= dev_offset
;
804 stripe
= stripes
+ index
;
805 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
806 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
807 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
810 BUG_ON(!list_empty(&private_devs
));
812 /* key was set above */
813 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
814 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
815 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
816 btrfs_set_stack_chunk_type(chunk
, type
);
817 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
818 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
819 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
820 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
821 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
822 map
->sector_size
= extent_root
->sectorsize
;
823 map
->stripe_len
= stripe_len
;
824 map
->io_align
= stripe_len
;
825 map
->io_width
= stripe_len
;
827 map
->num_stripes
= num_stripes
;
828 map
->sub_stripes
= sub_stripes
;
830 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
831 btrfs_chunk_item_size(num_stripes
));
833 *start
= key
.offset
;;
835 map
->ce
.start
= key
.offset
;
836 map
->ce
.size
= *num_bytes
;
838 ret
= insert_existing_cache_extent(
839 &extent_root
->fs_info
->mapping_tree
.cache_tree
,
843 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
844 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
845 chunk
, btrfs_chunk_item_size(num_stripes
));
853 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
854 struct btrfs_root
*extent_root
, u64
*start
,
855 u64 num_bytes
, u64 type
)
858 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
859 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
860 struct btrfs_stripe
*stripes
;
861 struct btrfs_device
*device
= NULL
;
862 struct btrfs_chunk
*chunk
;
863 struct list_head
*dev_list
= &extent_root
->fs_info
->fs_devices
->devices
;
864 struct list_head
*cur
;
865 struct map_lookup
*map
;
866 u64 calc_size
= 8 * 1024 * 1024;
871 int stripe_len
= 64 * 1024;
872 struct btrfs_key key
;
874 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
875 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
876 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
881 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
885 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
891 stripes
= &chunk
->stripe
;
892 calc_size
= num_bytes
;
895 cur
= dev_list
->next
;
896 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
898 while (index
< num_stripes
) {
899 struct btrfs_stripe
*stripe
;
901 ret
= btrfs_alloc_dev_extent(trans
, device
,
902 info
->chunk_root
->root_key
.objectid
,
903 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
904 calc_size
, &dev_offset
);
907 device
->bytes_used
+= calc_size
;
908 ret
= btrfs_update_device(trans
, device
);
911 map
->stripes
[index
].dev
= device
;
912 map
->stripes
[index
].physical
= dev_offset
;
913 stripe
= stripes
+ index
;
914 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
915 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
916 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
920 /* key was set above */
921 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
922 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
923 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
924 btrfs_set_stack_chunk_type(chunk
, type
);
925 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
926 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
927 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
928 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
929 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
930 map
->sector_size
= extent_root
->sectorsize
;
931 map
->stripe_len
= stripe_len
;
932 map
->io_align
= stripe_len
;
933 map
->io_width
= stripe_len
;
935 map
->num_stripes
= num_stripes
;
936 map
->sub_stripes
= sub_stripes
;
938 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
939 btrfs_chunk_item_size(num_stripes
));
943 map
->ce
.start
= key
.offset
;
944 map
->ce
.size
= num_bytes
;
946 ret
= insert_existing_cache_extent(
947 &extent_root
->fs_info
->mapping_tree
.cache_tree
,
955 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
957 cache_tree_init(&tree
->cache_tree
);
960 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
962 struct cache_extent
*ce
;
963 struct map_lookup
*map
;
966 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
968 BUG_ON(ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
);
969 map
= container_of(ce
, struct map_lookup
, ce
);
971 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
972 ret
= map
->num_stripes
;
973 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
974 ret
= map
->sub_stripes
;
980 int btrfs_next_metadata(struct btrfs_mapping_tree
*map_tree
, u64
*logical
,
983 struct cache_extent
*ce
;
984 struct map_lookup
*map
;
986 ce
= find_first_cache_extent(&map_tree
->cache_tree
, *logical
);
989 ce
= next_cache_extent(ce
);
993 map
= container_of(ce
, struct map_lookup
, ce
);
994 if (map
->type
& BTRFS_BLOCK_GROUP_METADATA
) {
995 *logical
= ce
->start
;
1004 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
1005 u64 chunk_start
, u64 physical
, u64 devid
,
1006 u64
**logical
, int *naddrs
, int *stripe_len
)
1008 struct cache_extent
*ce
;
1009 struct map_lookup
*map
;
1016 ce
= find_first_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1018 map
= container_of(ce
, struct map_lookup
, ce
);
1021 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1022 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1023 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1024 length
= ce
->size
/ map
->num_stripes
;
1026 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1028 for (i
= 0; i
< map
->num_stripes
; i
++) {
1029 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1031 if (map
->stripes
[i
].physical
> physical
||
1032 map
->stripes
[i
].physical
+ length
<= physical
)
1035 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1038 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1039 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1041 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1042 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1044 bytenr
= ce
->start
+ stripe_nr
* map
->stripe_len
;
1045 for (j
= 0; j
< nr
; j
++) {
1046 if (buf
[j
] == bytenr
)
1055 *stripe_len
= map
->stripe_len
;
1060 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1061 u64 logical
, u64
*length
,
1062 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
1064 return __btrfs_map_block(map_tree
, rw
, logical
, length
, NULL
,
1065 multi_ret
, mirror_num
);
1068 int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1069 u64 logical
, u64
*length
, u64
*type
,
1070 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
1072 struct cache_extent
*ce
;
1073 struct map_lookup
*map
;
1077 int stripes_allocated
= 8;
1078 int stripes_required
= 1;
1081 struct btrfs_multi_bio
*multi
= NULL
;
1083 if (multi_ret
&& rw
== READ
) {
1084 stripes_allocated
= 1;
1087 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
1093 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1100 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1105 map
= container_of(ce
, struct map_lookup
, ce
);
1106 offset
= logical
- ce
->start
;
1109 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1110 BTRFS_BLOCK_GROUP_DUP
)) {
1111 stripes_required
= map
->num_stripes
;
1112 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1113 stripes_required
= map
->sub_stripes
;
1116 /* if our multi bio struct is too small, back off and try again */
1117 if (multi_ret
&& rw
== WRITE
&&
1118 stripes_allocated
< stripes_required
) {
1119 stripes_allocated
= map
->num_stripes
;
1125 * stripe_nr counts the total number of stripes we have to stride
1126 * to get to this block
1128 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1130 stripe_offset
= stripe_nr
* map
->stripe_len
;
1131 BUG_ON(offset
< stripe_offset
);
1133 /* stripe_offset is the offset of this block in its stripe*/
1134 stripe_offset
= offset
- stripe_offset
;
1136 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1137 BTRFS_BLOCK_GROUP_RAID10
|
1138 BTRFS_BLOCK_GROUP_DUP
)) {
1139 /* we limit the length of each bio to what fits in a stripe */
1140 *length
= min_t(u64
, ce
->size
- offset
,
1141 map
->stripe_len
- stripe_offset
);
1143 *length
= ce
->size
- offset
;
1149 multi
->num_stripes
= 1;
1151 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1153 multi
->num_stripes
= map
->num_stripes
;
1154 else if (mirror_num
)
1155 stripe_index
= mirror_num
- 1;
1157 stripe_index
= stripe_nr
% map
->num_stripes
;
1158 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1159 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1161 stripe_index
= stripe_nr
% factor
;
1162 stripe_index
*= map
->sub_stripes
;
1165 multi
->num_stripes
= map
->sub_stripes
;
1166 else if (mirror_num
)
1167 stripe_index
+= mirror_num
- 1;
1169 stripe_nr
= stripe_nr
/ factor
;
1170 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1172 multi
->num_stripes
= map
->num_stripes
;
1173 else if (mirror_num
)
1174 stripe_index
= mirror_num
- 1;
1177 * after this do_div call, stripe_nr is the number of stripes
1178 * on this device we have to walk to find the data, and
1179 * stripe_index is the number of our device in the stripe array
1181 stripe_index
= stripe_nr
% map
->num_stripes
;
1182 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1184 BUG_ON(stripe_index
>= map
->num_stripes
);
1186 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1187 multi
->stripes
[i
].physical
=
1188 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1189 stripe_nr
* map
->stripe_len
;
1190 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1200 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1203 struct btrfs_device
*device
;
1204 struct btrfs_fs_devices
*cur_devices
;
1206 cur_devices
= root
->fs_info
->fs_devices
;
1207 while (cur_devices
) {
1209 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1210 device
= __find_device(&cur_devices
->devices
,
1215 cur_devices
= cur_devices
->seed
;
1220 int btrfs_bootstrap_super_map(struct btrfs_mapping_tree
*map_tree
,
1221 struct btrfs_fs_devices
*fs_devices
)
1223 struct map_lookup
*map
;
1224 u64 logical
= BTRFS_SUPER_INFO_OFFSET
;
1225 u64 length
= BTRFS_SUPER_INFO_SIZE
;
1226 int num_stripes
= 0;
1227 int sub_stripes
= 0;
1230 struct list_head
*cur
;
1232 list_for_each(cur
, &fs_devices
->devices
) {
1235 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1239 map
->ce
.start
= logical
;
1240 map
->ce
.size
= length
;
1241 map
->num_stripes
= num_stripes
;
1242 map
->sub_stripes
= sub_stripes
;
1243 map
->io_width
= length
;
1244 map
->io_align
= length
;
1245 map
->sector_size
= length
;
1246 map
->stripe_len
= length
;
1247 map
->type
= BTRFS_BLOCK_GROUP_RAID1
;
1250 list_for_each(cur
, &fs_devices
->devices
) {
1251 struct btrfs_device
*device
= list_entry(cur
,
1252 struct btrfs_device
,
1254 map
->stripes
[i
].physical
= logical
;
1255 map
->stripes
[i
].dev
= device
;
1258 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1259 if (ret
== -EEXIST
) {
1260 struct cache_extent
*old
;
1261 struct map_lookup
*old_map
;
1262 old
= find_cache_extent(&map_tree
->cache_tree
, logical
, length
);
1263 old_map
= container_of(old
, struct map_lookup
, ce
);
1264 remove_cache_extent(&map_tree
->cache_tree
, old
);
1266 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
,
1273 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
1275 struct cache_extent
*ce
;
1276 struct map_lookup
*map
;
1277 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1281 ce
= find_first_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1284 map
= container_of(ce
, struct map_lookup
, ce
);
1285 for (i
= 0; i
< map
->num_stripes
; i
++) {
1286 if (!map
->stripes
[i
].dev
->writeable
) {
1295 static struct btrfs_device
*fill_missing_device(u64 devid
)
1297 struct btrfs_device
*device
;
1299 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1300 device
->devid
= devid
;
1305 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1306 struct extent_buffer
*leaf
,
1307 struct btrfs_chunk
*chunk
)
1309 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1310 struct map_lookup
*map
;
1311 struct cache_extent
*ce
;
1315 u8 uuid
[BTRFS_UUID_SIZE
];
1320 logical
= key
->offset
;
1321 length
= btrfs_chunk_length(leaf
, chunk
);
1323 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
1325 /* already mapped? */
1326 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1330 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1331 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1335 map
->ce
.start
= logical
;
1336 map
->ce
.size
= length
;
1337 map
->num_stripes
= num_stripes
;
1338 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1339 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1340 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1341 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1342 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1343 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1345 for (i
= 0; i
< num_stripes
; i
++) {
1346 map
->stripes
[i
].physical
=
1347 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1348 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1349 read_extent_buffer(leaf
, uuid
, (unsigned long)
1350 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1352 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
1354 if (!map
->stripes
[i
].dev
) {
1355 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1356 printf("warning, device %llu is missing\n",
1357 (unsigned long long)devid
);
1361 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1367 static int fill_device_from_item(struct extent_buffer
*leaf
,
1368 struct btrfs_dev_item
*dev_item
,
1369 struct btrfs_device
*device
)
1373 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1374 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1375 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1376 device
->type
= btrfs_device_type(leaf
, dev_item
);
1377 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1378 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1379 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1381 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1382 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1387 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
1389 struct btrfs_fs_devices
*fs_devices
;
1392 fs_devices
= root
->fs_info
->fs_devices
->seed
;
1393 while (fs_devices
) {
1394 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1398 fs_devices
= fs_devices
->seed
;
1401 fs_devices
= find_fsid(fsid
);
1407 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1411 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
1412 root
->fs_info
->fs_devices
->seed
= fs_devices
;
1417 static int read_one_dev(struct btrfs_root
*root
,
1418 struct extent_buffer
*leaf
,
1419 struct btrfs_dev_item
*dev_item
)
1421 struct btrfs_device
*device
;
1424 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1425 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1427 devid
= btrfs_device_id(leaf
, dev_item
);
1428 read_extent_buffer(leaf
, dev_uuid
,
1429 (unsigned long)btrfs_device_uuid(dev_item
),
1431 read_extent_buffer(leaf
, fs_uuid
,
1432 (unsigned long)btrfs_device_fsid(dev_item
),
1435 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1436 ret
= open_seed_devices(root
, fs_uuid
);
1441 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1443 printk("warning devid %llu not found already\n",
1444 (unsigned long long)devid
);
1445 device
= kmalloc(sizeof(*device
), GFP_NOFS
);
1448 device
->total_ios
= 0;
1449 list_add(&device
->dev_list
,
1450 &root
->fs_info
->fs_devices
->devices
);
1453 fill_device_from_item(leaf
, dev_item
, device
);
1454 device
->dev_root
= root
->fs_info
->dev_root
;
1458 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
1460 struct btrfs_dev_item
*dev_item
;
1462 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
1464 return read_one_dev(root
, buf
, dev_item
);
1467 int btrfs_read_sys_array(struct btrfs_root
*root
)
1469 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1470 struct extent_buffer
*sb
;
1471 struct btrfs_disk_key
*disk_key
;
1472 struct btrfs_chunk
*chunk
;
1473 struct btrfs_key key
;
1478 unsigned long sb_ptr
;
1482 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
1483 BTRFS_SUPER_INFO_SIZE
);
1486 btrfs_set_buffer_uptodate(sb
);
1487 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
1488 array_size
= btrfs_super_sys_array_size(super_copy
);
1491 * we do this loop twice, once for the device items and
1492 * once for all of the chunks. This way there are device
1493 * structs filled in for every chunk
1495 ptr
= super_copy
->sys_chunk_array
;
1496 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1499 while (cur
< array_size
) {
1500 disk_key
= (struct btrfs_disk_key
*)ptr
;
1501 btrfs_disk_key_to_cpu(&key
, disk_key
);
1503 len
= sizeof(*disk_key
);
1508 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1509 chunk
= (struct btrfs_chunk
*)sb_ptr
;
1510 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
1513 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1514 len
= btrfs_chunk_item_size(num_stripes
);
1522 free_extent_buffer(sb
);
1526 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1528 struct btrfs_path
*path
;
1529 struct extent_buffer
*leaf
;
1530 struct btrfs_key key
;
1531 struct btrfs_key found_key
;
1535 root
= root
->fs_info
->chunk_root
;
1537 path
= btrfs_alloc_path();
1541 /* first we search for all of the device items, and then we
1542 * read in all of the chunk items. This way we can create chunk
1543 * mappings that reference all of the devices that are afound
1545 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1549 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1551 leaf
= path
->nodes
[0];
1552 slot
= path
->slots
[0];
1553 if (slot
>= btrfs_header_nritems(leaf
)) {
1554 ret
= btrfs_next_leaf(root
, path
);
1561 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1562 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1563 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
1565 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
1566 struct btrfs_dev_item
*dev_item
;
1567 dev_item
= btrfs_item_ptr(leaf
, slot
,
1568 struct btrfs_dev_item
);
1569 ret
= read_one_dev(root
, leaf
, dev_item
);
1572 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1573 struct btrfs_chunk
*chunk
;
1574 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
1575 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
1580 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1582 btrfs_release_path(root
, path
);
1586 btrfs_free_path(path
);
1592 struct list_head
*btrfs_scanned_uuids(void)