2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <asm/div64.h>
24 #include "extent_map.h"
26 #include "transaction.h"
27 #include "print-tree.h"
38 struct btrfs_bio_stripe stripes
[];
41 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
44 static DEFINE_MUTEX(uuid_mutex
);
45 static LIST_HEAD(fs_uuids
);
47 int btrfs_cleanup_fs_uuids(void)
49 struct btrfs_fs_devices
*fs_devices
;
50 struct list_head
*uuid_cur
;
51 struct list_head
*devices_cur
;
52 struct btrfs_device
*dev
;
54 list_for_each(uuid_cur
, &fs_uuids
) {
55 fs_devices
= list_entry(uuid_cur
, struct btrfs_fs_devices
,
57 while(!list_empty(&fs_devices
->devices
)) {
58 devices_cur
= fs_devices
->devices
.next
;
59 dev
= list_entry(devices_cur
, struct btrfs_device
,
62 close_bdev_excl(dev
->bdev
);
64 list_del(&dev
->dev_list
);
71 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
74 struct btrfs_device
*dev
;
75 struct list_head
*cur
;
77 list_for_each(cur
, head
) {
78 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
79 if (dev
->devid
== devid
&&
80 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
87 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
89 struct list_head
*cur
;
90 struct btrfs_fs_devices
*fs_devices
;
92 list_for_each(cur
, &fs_uuids
) {
93 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
94 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
100 static int device_list_add(const char *path
,
101 struct btrfs_super_block
*disk_super
,
102 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
104 struct btrfs_device
*device
;
105 struct btrfs_fs_devices
*fs_devices
;
106 u64 found_transid
= btrfs_super_generation(disk_super
);
108 fs_devices
= find_fsid(disk_super
->fsid
);
110 fs_devices
= kmalloc(sizeof(*fs_devices
), GFP_NOFS
);
113 INIT_LIST_HEAD(&fs_devices
->devices
);
114 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
115 list_add(&fs_devices
->list
, &fs_uuids
);
116 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
117 fs_devices
->latest_devid
= devid
;
118 fs_devices
->latest_trans
= found_transid
;
119 fs_devices
->lowest_devid
= (u64
)-1;
120 fs_devices
->num_devices
= 0;
123 device
= __find_device(&fs_devices
->devices
, devid
,
124 disk_super
->dev_item
.uuid
);
127 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
129 /* we can safely leave the fs_devices entry around */
132 device
->devid
= devid
;
133 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
135 device
->barriers
= 1;
136 spin_lock_init(&device
->io_lock
);
137 device
->name
= kstrdup(path
, GFP_NOFS
);
142 list_add(&device
->dev_list
, &fs_devices
->devices
);
143 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
144 fs_devices
->num_devices
++;
147 if (found_transid
> fs_devices
->latest_trans
) {
148 fs_devices
->latest_devid
= devid
;
149 fs_devices
->latest_trans
= found_transid
;
151 if (fs_devices
->lowest_devid
> devid
) {
152 fs_devices
->lowest_devid
= devid
;
154 *fs_devices_ret
= fs_devices
;
158 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
160 struct list_head
*head
= &fs_devices
->devices
;
161 struct list_head
*cur
;
162 struct btrfs_device
*device
;
164 mutex_lock(&uuid_mutex
);
165 list_for_each(cur
, head
) {
166 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
168 close_bdev_excl(device
->bdev
);
172 mutex_unlock(&uuid_mutex
);
176 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
177 int flags
, void *holder
)
179 struct block_device
*bdev
;
180 struct list_head
*head
= &fs_devices
->devices
;
181 struct list_head
*cur
;
182 struct btrfs_device
*device
;
185 mutex_lock(&uuid_mutex
);
186 list_for_each(cur
, head
) {
187 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
188 bdev
= open_bdev_excl(device
->name
, flags
, holder
);
191 printk("open %s failed\n", device
->name
);
195 if (device
->devid
== fs_devices
->latest_devid
)
196 fs_devices
->latest_bdev
= bdev
;
197 if (device
->devid
== fs_devices
->lowest_devid
) {
198 fs_devices
->lowest_bdev
= bdev
;
202 mutex_unlock(&uuid_mutex
);
205 mutex_unlock(&uuid_mutex
);
206 btrfs_close_devices(fs_devices
);
210 int btrfs_scan_one_device(const char *path
, int flags
, void *holder
,
211 struct btrfs_fs_devices
**fs_devices_ret
)
213 struct btrfs_super_block
*disk_super
;
214 struct block_device
*bdev
;
215 struct buffer_head
*bh
;
220 mutex_lock(&uuid_mutex
);
222 bdev
= open_bdev_excl(path
, flags
, holder
);
229 ret
= set_blocksize(bdev
, 4096);
232 bh
= __bread(bdev
, BTRFS_SUPER_INFO_OFFSET
/ 4096, 4096);
237 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
238 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
239 sizeof(disk_super
->magic
))) {
243 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
244 transid
= btrfs_super_generation(disk_super
);
245 if (disk_super
->label
[0])
246 printk("device label %s ", disk_super
->label
);
248 /* FIXME, make a readl uuid parser */
249 printk("device fsid %llx-%llx ",
250 *(unsigned long long *)disk_super
->fsid
,
251 *(unsigned long long *)(disk_super
->fsid
+ 8));
253 printk("devid %Lu transid %Lu %s\n", devid
, transid
, path
);
254 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
259 close_bdev_excl(bdev
);
261 mutex_unlock(&uuid_mutex
);
266 * this uses a pretty simple search, the expectation is that it is
267 * called very infrequently and that a given device has a small number
270 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
271 struct btrfs_device
*device
,
272 struct btrfs_path
*path
,
273 u64 num_bytes
, u64
*start
)
275 struct btrfs_key key
;
276 struct btrfs_root
*root
= device
->dev_root
;
277 struct btrfs_dev_extent
*dev_extent
= NULL
;
280 u64 search_start
= 0;
281 u64 search_end
= device
->total_bytes
;
285 struct extent_buffer
*l
;
290 /* FIXME use last free of some kind */
292 /* we don't want to overwrite the superblock on the drive,
293 * so we make sure to start at an offset of at least 1MB
295 search_start
= max((u64
)1024 * 1024, search_start
);
297 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
298 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
300 key
.objectid
= device
->devid
;
301 key
.offset
= search_start
;
302 key
.type
= BTRFS_DEV_EXTENT_KEY
;
303 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
306 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
310 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
313 slot
= path
->slots
[0];
314 if (slot
>= btrfs_header_nritems(l
)) {
315 ret
= btrfs_next_leaf(root
, path
);
322 if (search_start
>= search_end
) {
326 *start
= search_start
;
330 *start
= last_byte
> search_start
?
331 last_byte
: search_start
;
332 if (search_end
<= *start
) {
338 btrfs_item_key_to_cpu(l
, &key
, slot
);
340 if (key
.objectid
< device
->devid
)
343 if (key
.objectid
> device
->devid
)
346 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
348 if (last_byte
< search_start
)
349 last_byte
= search_start
;
350 hole_size
= key
.offset
- last_byte
;
351 if (key
.offset
> last_byte
&&
352 hole_size
>= num_bytes
) {
357 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
362 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
363 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
369 /* we have to make sure we didn't find an extent that has already
370 * been allocated by the map tree or the original allocation
372 btrfs_release_path(root
, path
);
373 BUG_ON(*start
< search_start
);
375 if (*start
+ num_bytes
> search_end
) {
379 /* check for pending inserts here */
383 btrfs_release_path(root
, path
);
387 int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
388 struct btrfs_device
*device
,
392 struct btrfs_path
*path
;
393 struct btrfs_root
*root
= device
->dev_root
;
394 struct btrfs_key key
;
396 path
= btrfs_alloc_path();
400 key
.objectid
= device
->devid
;
402 key
.type
= BTRFS_DEV_EXTENT_KEY
;
404 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
407 ret
= btrfs_del_item(trans
, root
, path
);
410 btrfs_free_path(path
);
414 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
415 struct btrfs_device
*device
,
416 u64 chunk_tree
, u64 chunk_objectid
,
418 u64 num_bytes
, u64
*start
)
421 struct btrfs_path
*path
;
422 struct btrfs_root
*root
= device
->dev_root
;
423 struct btrfs_dev_extent
*extent
;
424 struct extent_buffer
*leaf
;
425 struct btrfs_key key
;
427 path
= btrfs_alloc_path();
431 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
436 key
.objectid
= device
->devid
;
438 key
.type
= BTRFS_DEV_EXTENT_KEY
;
439 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
443 leaf
= path
->nodes
[0];
444 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
445 struct btrfs_dev_extent
);
446 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
447 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
448 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
450 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
451 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
454 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
455 btrfs_mark_buffer_dirty(leaf
);
457 btrfs_free_path(path
);
461 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
463 struct btrfs_path
*path
;
465 struct btrfs_key key
;
466 struct btrfs_chunk
*chunk
;
467 struct btrfs_key found_key
;
469 path
= btrfs_alloc_path();
472 key
.objectid
= objectid
;
473 key
.offset
= (u64
)-1;
474 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
476 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
482 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
486 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
488 if (found_key
.objectid
!= objectid
)
491 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
493 *offset
= found_key
.offset
+
494 btrfs_chunk_length(path
->nodes
[0], chunk
);
499 btrfs_free_path(path
);
503 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
507 struct btrfs_key key
;
508 struct btrfs_key found_key
;
510 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
511 key
.type
= BTRFS_DEV_ITEM_KEY
;
512 key
.offset
= (u64
)-1;
514 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
520 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
525 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
527 *objectid
= found_key
.offset
+ 1;
531 btrfs_release_path(root
, path
);
536 * the device information is stored in the chunk root
537 * the btrfs_device struct should be fully filled in
539 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
540 struct btrfs_root
*root
,
541 struct btrfs_device
*device
)
544 struct btrfs_path
*path
;
545 struct btrfs_dev_item
*dev_item
;
546 struct extent_buffer
*leaf
;
547 struct btrfs_key key
;
551 root
= root
->fs_info
->chunk_root
;
553 path
= btrfs_alloc_path();
557 ret
= find_next_devid(root
, path
, &free_devid
);
561 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
562 key
.type
= BTRFS_DEV_ITEM_KEY
;
563 key
.offset
= free_devid
;
565 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
570 leaf
= path
->nodes
[0];
571 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
573 device
->devid
= free_devid
;
574 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
575 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
576 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
577 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
578 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
579 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
580 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
581 btrfs_set_device_group(leaf
, dev_item
, 0);
582 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
583 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
585 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
586 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
587 btrfs_mark_buffer_dirty(leaf
);
591 btrfs_free_path(path
);
595 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
596 struct btrfs_device
*device
)
599 struct btrfs_path
*path
;
600 struct btrfs_root
*root
;
601 struct btrfs_dev_item
*dev_item
;
602 struct extent_buffer
*leaf
;
603 struct btrfs_key key
;
605 root
= device
->dev_root
->fs_info
->chunk_root
;
607 path
= btrfs_alloc_path();
611 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
612 key
.type
= BTRFS_DEV_ITEM_KEY
;
613 key
.offset
= device
->devid
;
615 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
624 leaf
= path
->nodes
[0];
625 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
627 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
628 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
629 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
630 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
631 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
632 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
633 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
634 btrfs_mark_buffer_dirty(leaf
);
637 btrfs_free_path(path
);
641 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
642 struct btrfs_device
*device
, u64 new_size
)
644 struct btrfs_super_block
*super_copy
=
645 &device
->dev_root
->fs_info
->super_copy
;
646 u64 old_total
= btrfs_super_total_bytes(super_copy
);
647 u64 diff
= new_size
- device
->total_bytes
;
649 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
650 return btrfs_update_device(trans
, device
);
653 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
654 struct btrfs_root
*root
,
655 u64 chunk_tree
, u64 chunk_objectid
,
659 struct btrfs_path
*path
;
660 struct btrfs_key key
;
662 root
= root
->fs_info
->chunk_root
;
663 path
= btrfs_alloc_path();
667 key
.objectid
= chunk_objectid
;
668 key
.offset
= chunk_offset
;
669 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
671 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
674 ret
= btrfs_del_item(trans
, root
, path
);
677 btrfs_free_path(path
);
681 int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
684 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
685 struct btrfs_disk_key
*disk_key
;
686 struct btrfs_chunk
*chunk
;
693 struct btrfs_key key
;
695 array_size
= btrfs_super_sys_array_size(super_copy
);
697 ptr
= super_copy
->sys_chunk_array
;
700 while (cur
< array_size
) {
701 disk_key
= (struct btrfs_disk_key
*)ptr
;
702 btrfs_disk_key_to_cpu(&key
, disk_key
);
704 len
= sizeof(*disk_key
);
706 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
707 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
708 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
709 len
+= btrfs_chunk_item_size(num_stripes
);
714 if (key
.objectid
== chunk_objectid
&&
715 key
.offset
== chunk_offset
) {
716 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
718 btrfs_set_super_sys_array_size(super_copy
, array_size
);
728 int btrfs_relocate_chunk(struct btrfs_root
*root
,
729 u64 chunk_tree
, u64 chunk_objectid
,
732 struct extent_map_tree
*em_tree
;
733 struct btrfs_root
*extent_root
;
734 struct btrfs_trans_handle
*trans
;
735 struct extent_map
*em
;
736 struct map_lookup
*map
;
740 root
= root
->fs_info
->chunk_root
;
741 extent_root
= root
->fs_info
->extent_root
;
742 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
744 /* step one, relocate all the extents inside this chunk */
745 ret
= btrfs_shrink_extent_tree(extent_root
, chunk_offset
);
748 trans
= btrfs_start_transaction(root
, 1);
752 * step two, delete the device extents and the
755 spin_lock(&em_tree
->lock
);
756 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
757 spin_unlock(&em_tree
->lock
);
759 BUG_ON(em
->start
> chunk_offset
|| em
->start
+ em
->len
< chunk_offset
);
760 map
= (struct map_lookup
*)em
->bdev
;
762 for (i
= 0; i
< map
->num_stripes
; i
++) {
763 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
764 map
->stripes
[i
].physical
);
767 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
772 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
773 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
780 spin_lock(&em_tree
->lock
);
781 remove_extent_mapping(em_tree
, em
);
785 /* once for the tree */
787 spin_unlock(&em_tree
->lock
);
793 btrfs_end_transaction(trans
, root
);
798 * shrinking a device means finding all of the device extents past
799 * the new size, and then following the back refs to the chunks.
800 * The chunk relocation code actually frees the device extent
802 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
804 struct btrfs_trans_handle
*trans
;
805 struct btrfs_root
*root
= device
->dev_root
;
806 struct btrfs_dev_extent
*dev_extent
= NULL
;
807 struct btrfs_path
*path
;
814 struct extent_buffer
*l
;
815 struct btrfs_key key
;
816 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
817 u64 old_total
= btrfs_super_total_bytes(super_copy
);
818 u64 diff
= device
->total_bytes
- new_size
;
821 path
= btrfs_alloc_path();
825 trans
= btrfs_start_transaction(root
, 1);
833 device
->total_bytes
= new_size
;
834 ret
= btrfs_update_device(trans
, device
);
836 btrfs_end_transaction(trans
, root
);
839 WARN_ON(diff
> old_total
);
840 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
841 btrfs_end_transaction(trans
, root
);
843 key
.objectid
= device
->devid
;
844 key
.offset
= (u64
)-1;
845 key
.type
= BTRFS_DEV_EXTENT_KEY
;
848 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
852 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
861 slot
= path
->slots
[0];
862 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
864 if (key
.objectid
!= device
->devid
)
867 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
868 length
= btrfs_dev_extent_length(l
, dev_extent
);
870 if (key
.offset
+ length
<= new_size
)
873 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
874 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
875 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
876 btrfs_release_path(root
, path
);
878 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
885 btrfs_free_path(path
);
889 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
890 struct btrfs_root
*root
,
891 struct btrfs_key
*key
,
892 struct btrfs_chunk
*chunk
, int item_size
)
894 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
895 struct btrfs_disk_key disk_key
;
899 array_size
= btrfs_super_sys_array_size(super_copy
);
900 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
903 ptr
= super_copy
->sys_chunk_array
+ array_size
;
904 btrfs_cpu_key_to_disk(&disk_key
, key
);
905 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
906 ptr
+= sizeof(disk_key
);
907 memcpy(ptr
, chunk
, item_size
);
908 item_size
+= sizeof(disk_key
);
909 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
913 static u64
div_factor(u64 num
, int factor
)
922 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
925 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
927 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
928 return calc_size
* (num_stripes
/ sub_stripes
);
930 return calc_size
* num_stripes
;
934 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
935 struct btrfs_root
*extent_root
, u64
*start
,
936 u64
*num_bytes
, u64 type
)
939 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
940 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
941 struct btrfs_path
*path
;
942 struct btrfs_stripe
*stripes
;
943 struct btrfs_device
*device
= NULL
;
944 struct btrfs_chunk
*chunk
;
945 struct list_head private_devs
;
946 struct list_head
*dev_list
;
947 struct list_head
*cur
;
948 struct extent_map_tree
*em_tree
;
949 struct map_lookup
*map
;
950 struct extent_map
*em
;
951 int min_stripe_size
= 1 * 1024 * 1024;
953 u64 calc_size
= 1024 * 1024 * 1024;
954 u64 max_chunk_size
= calc_size
;
965 int stripe_len
= 64 * 1024;
966 struct btrfs_key key
;
968 dev_list
= &extent_root
->fs_info
->fs_devices
->alloc_list
;
969 if (list_empty(dev_list
))
972 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
973 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
976 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
980 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
981 num_stripes
= min_t(u64
, 2,
982 btrfs_super_num_devices(&info
->super_copy
));
987 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
988 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
991 num_stripes
&= ~(u32
)1;
996 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
997 max_chunk_size
= 10 * calc_size
;
998 min_stripe_size
= 64 * 1024 * 1024;
999 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
1000 max_chunk_size
= 4 * calc_size
;
1001 min_stripe_size
= 32 * 1024 * 1024;
1002 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1003 calc_size
= 8 * 1024 * 1024;
1004 max_chunk_size
= calc_size
* 2;
1005 min_stripe_size
= 1 * 1024 * 1024;
1008 path
= btrfs_alloc_path();
1012 /* we don't want a chunk larger than 10% of the FS */
1013 percent_max
= div_factor(btrfs_super_total_bytes(&info
->super_copy
), 1);
1014 max_chunk_size
= min(percent_max
, max_chunk_size
);
1017 if (calc_size
* num_stripes
> max_chunk_size
) {
1018 calc_size
= max_chunk_size
;
1019 do_div(calc_size
, num_stripes
);
1020 do_div(calc_size
, stripe_len
);
1021 calc_size
*= stripe_len
;
1023 /* we don't want tiny stripes */
1024 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
1026 do_div(calc_size
, stripe_len
);
1027 calc_size
*= stripe_len
;
1029 INIT_LIST_HEAD(&private_devs
);
1030 cur
= dev_list
->next
;
1033 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1034 min_free
= calc_size
* 2;
1036 min_free
= calc_size
;
1038 /* we add 1MB because we never use the first 1MB of the device */
1039 min_free
+= 1024 * 1024;
1041 /* build a private list of devices we will allocate from */
1042 while(index
< num_stripes
) {
1043 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
1045 avail
= device
->total_bytes
- device
->bytes_used
;
1048 if (avail
>= min_free
) {
1049 u64 ignored_start
= 0;
1050 ret
= find_free_dev_extent(trans
, device
, path
,
1054 list_move_tail(&device
->dev_alloc_list
,
1057 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1060 } else if (avail
> max_avail
)
1062 if (cur
== dev_list
)
1065 if (index
< num_stripes
) {
1066 list_splice(&private_devs
, dev_list
);
1067 if (index
>= min_stripes
) {
1068 num_stripes
= index
;
1069 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1070 num_stripes
/= sub_stripes
;
1071 num_stripes
*= sub_stripes
;
1076 if (!looped
&& max_avail
> 0) {
1078 calc_size
= max_avail
;
1081 btrfs_free_path(path
);
1084 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1085 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1086 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1089 btrfs_free_path(path
);
1093 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1095 btrfs_free_path(path
);
1099 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1102 btrfs_free_path(path
);
1105 btrfs_free_path(path
);
1108 stripes
= &chunk
->stripe
;
1109 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1110 num_stripes
, sub_stripes
);
1113 printk("new chunk type %Lu start %Lu size %Lu\n", type
, key
.offset
, *num_bytes
);
1114 while(index
< num_stripes
) {
1115 struct btrfs_stripe
*stripe
;
1116 BUG_ON(list_empty(&private_devs
));
1117 cur
= private_devs
.next
;
1118 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
1120 /* loop over this device again if we're doing a dup group */
1121 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1122 (index
== num_stripes
- 1))
1123 list_move_tail(&device
->dev_alloc_list
, dev_list
);
1125 ret
= btrfs_alloc_dev_extent(trans
, device
,
1126 info
->chunk_root
->root_key
.objectid
,
1127 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1128 calc_size
, &dev_offset
);
1130 printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key
.offset
, calc_size
, device
->devid
, type
);
1131 device
->bytes_used
+= calc_size
;
1132 ret
= btrfs_update_device(trans
, device
);
1135 map
->stripes
[index
].dev
= device
;
1136 map
->stripes
[index
].physical
= dev_offset
;
1137 stripe
= stripes
+ index
;
1138 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1139 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1140 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1141 physical
= dev_offset
;
1144 BUG_ON(!list_empty(&private_devs
));
1146 /* key was set above */
1147 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1148 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1149 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1150 btrfs_set_stack_chunk_type(chunk
, type
);
1151 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1152 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1153 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1154 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1155 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1156 map
->sector_size
= extent_root
->sectorsize
;
1157 map
->stripe_len
= stripe_len
;
1158 map
->io_align
= stripe_len
;
1159 map
->io_width
= stripe_len
;
1161 map
->num_stripes
= num_stripes
;
1162 map
->sub_stripes
= sub_stripes
;
1164 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1165 btrfs_chunk_item_size(num_stripes
));
1167 *start
= key
.offset
;;
1169 em
= alloc_extent_map(GFP_NOFS
);
1172 em
->bdev
= (struct block_device
*)map
;
1173 em
->start
= key
.offset
;
1174 em
->len
= *num_bytes
;
1175 em
->block_start
= 0;
1177 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1178 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
1179 chunk
, btrfs_chunk_item_size(num_stripes
));
1184 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
1185 spin_lock(&em_tree
->lock
);
1186 ret
= add_extent_mapping(em_tree
, em
);
1187 spin_unlock(&em_tree
->lock
);
1189 free_extent_map(em
);
1193 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
1195 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
1198 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
1200 struct extent_map
*em
;
1203 spin_lock(&tree
->map_tree
.lock
);
1204 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
1206 remove_extent_mapping(&tree
->map_tree
, em
);
1207 spin_unlock(&tree
->map_tree
.lock
);
1212 free_extent_map(em
);
1213 /* once for the tree */
1214 free_extent_map(em
);
1218 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
1220 struct extent_map
*em
;
1221 struct map_lookup
*map
;
1222 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
1225 spin_lock(&em_tree
->lock
);
1226 em
= lookup_extent_mapping(em_tree
, logical
, len
);
1227 spin_unlock(&em_tree
->lock
);
1230 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
1231 map
= (struct map_lookup
*)em
->bdev
;
1232 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1233 ret
= map
->num_stripes
;
1234 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1235 ret
= map
->sub_stripes
;
1238 free_extent_map(em
);
1242 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1243 u64 logical
, u64
*length
,
1244 struct btrfs_multi_bio
**multi_ret
,
1245 int mirror_num
, struct page
*unplug_page
)
1247 struct extent_map
*em
;
1248 struct map_lookup
*map
;
1249 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
1253 int stripes_allocated
= 8;
1254 int stripes_required
= 1;
1258 struct btrfs_multi_bio
*multi
= NULL
;
1260 if (multi_ret
&& !(rw
& (1 << BIO_RW
))) {
1261 stripes_allocated
= 1;
1265 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1271 spin_lock(&em_tree
->lock
);
1272 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
1273 spin_unlock(&em_tree
->lock
);
1275 if (!em
&& unplug_page
)
1279 printk("unable to find logical %Lu\n", logical
);
1283 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
1284 map
= (struct map_lookup
*)em
->bdev
;
1285 offset
= logical
- em
->start
;
1287 if (mirror_num
> map
->num_stripes
)
1290 /* if our multi bio struct is too small, back off and try again */
1291 if (rw
& (1 << BIO_RW
)) {
1292 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1293 BTRFS_BLOCK_GROUP_DUP
)) {
1294 stripes_required
= map
->num_stripes
;
1295 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1296 stripes_required
= map
->sub_stripes
;
1299 if (multi_ret
&& rw
== WRITE
&&
1300 stripes_allocated
< stripes_required
) {
1301 stripes_allocated
= map
->num_stripes
;
1302 free_extent_map(em
);
1308 * stripe_nr counts the total number of stripes we have to stride
1309 * to get to this block
1311 do_div(stripe_nr
, map
->stripe_len
);
1313 stripe_offset
= stripe_nr
* map
->stripe_len
;
1314 BUG_ON(offset
< stripe_offset
);
1316 /* stripe_offset is the offset of this block in its stripe*/
1317 stripe_offset
= offset
- stripe_offset
;
1319 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1320 BTRFS_BLOCK_GROUP_RAID10
|
1321 BTRFS_BLOCK_GROUP_DUP
)) {
1322 /* we limit the length of each bio to what fits in a stripe */
1323 *length
= min_t(u64
, em
->len
- offset
,
1324 map
->stripe_len
- stripe_offset
);
1326 *length
= em
->len
- offset
;
1329 if (!multi_ret
&& !unplug_page
)
1334 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1335 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
1336 num_stripes
= map
->num_stripes
;
1337 else if (mirror_num
) {
1338 stripe_index
= mirror_num
- 1;
1340 u64 orig_stripe_nr
= stripe_nr
;
1341 stripe_index
= do_div(orig_stripe_nr
, num_stripes
);
1343 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1344 if (rw
& (1 << BIO_RW
))
1345 num_stripes
= map
->num_stripes
;
1346 else if (mirror_num
)
1347 stripe_index
= mirror_num
- 1;
1348 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1349 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1351 stripe_index
= do_div(stripe_nr
, factor
);
1352 stripe_index
*= map
->sub_stripes
;
1354 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
1355 num_stripes
= map
->sub_stripes
;
1356 else if (mirror_num
)
1357 stripe_index
+= mirror_num
- 1;
1359 u64 orig_stripe_nr
= stripe_nr
;
1360 stripe_index
+= do_div(orig_stripe_nr
,
1365 * after this do_div call, stripe_nr is the number of stripes
1366 * on this device we have to walk to find the data, and
1367 * stripe_index is the number of our device in the stripe array
1369 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
1371 BUG_ON(stripe_index
>= map
->num_stripes
);
1373 for (i
= 0; i
< num_stripes
; i
++) {
1375 struct btrfs_device
*device
;
1376 struct backing_dev_info
*bdi
;
1378 device
= map
->stripes
[stripe_index
].dev
;
1379 bdi
= blk_get_backing_dev_info(device
->bdev
);
1380 if (bdi
->unplug_io_fn
) {
1381 bdi
->unplug_io_fn(bdi
, unplug_page
);
1384 multi
->stripes
[i
].physical
=
1385 map
->stripes
[stripe_index
].physical
+
1386 stripe_offset
+ stripe_nr
* map
->stripe_len
;
1387 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1393 multi
->num_stripes
= num_stripes
;
1396 free_extent_map(em
);
1400 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1401 u64 logical
, u64
*length
,
1402 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
1404 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
1408 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
1409 u64 logical
, struct page
*page
)
1411 u64 length
= PAGE_CACHE_SIZE
;
1412 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
1417 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1418 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
1420 static int end_bio_multi_stripe(struct bio
*bio
,
1421 unsigned int bytes_done
, int err
)
1424 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
1426 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1433 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
1434 bio
->bi_private
= multi
->private;
1435 bio
->bi_end_io
= multi
->end_io
;
1437 if (!err
&& multi
->error
)
1441 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1442 bio_endio(bio
, bio
->bi_size
, err
);
1444 bio_endio(bio
, err
);
1449 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1454 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
1457 struct btrfs_mapping_tree
*map_tree
;
1458 struct btrfs_device
*dev
;
1459 struct bio
*first_bio
= bio
;
1460 u64 logical
= bio
->bi_sector
<< 9;
1463 struct btrfs_multi_bio
*multi
= NULL
;
1468 length
= bio
->bi_size
;
1469 map_tree
= &root
->fs_info
->mapping_tree
;
1470 map_length
= length
;
1472 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
1476 total_devs
= multi
->num_stripes
;
1477 if (map_length
< length
) {
1478 printk("mapping failed logical %Lu bio len %Lu "
1479 "len %Lu\n", logical
, length
, map_length
);
1482 multi
->end_io
= first_bio
->bi_end_io
;
1483 multi
->private = first_bio
->bi_private
;
1484 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
1486 while(dev_nr
< total_devs
) {
1487 if (total_devs
> 1) {
1488 if (dev_nr
< total_devs
- 1) {
1489 bio
= bio_clone(first_bio
, GFP_NOFS
);
1494 bio
->bi_private
= multi
;
1495 bio
->bi_end_io
= end_bio_multi_stripe
;
1497 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
1498 dev
= multi
->stripes
[dev_nr
].dev
;
1500 bio
->bi_bdev
= dev
->bdev
;
1501 spin_lock(&dev
->io_lock
);
1503 spin_unlock(&dev
->io_lock
);
1504 submit_bio(rw
, bio
);
1507 if (total_devs
== 1)
1512 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1515 struct list_head
*head
= &root
->fs_info
->fs_devices
->devices
;
1517 return __find_device(head
, devid
, uuid
);
1520 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1521 struct extent_buffer
*leaf
,
1522 struct btrfs_chunk
*chunk
)
1524 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1525 struct map_lookup
*map
;
1526 struct extent_map
*em
;
1530 u8 uuid
[BTRFS_UUID_SIZE
];
1535 logical
= key
->offset
;
1536 length
= btrfs_chunk_length(leaf
, chunk
);
1537 spin_lock(&map_tree
->map_tree
.lock
);
1538 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
1539 spin_unlock(&map_tree
->map_tree
.lock
);
1541 /* already mapped? */
1542 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
1543 free_extent_map(em
);
1546 free_extent_map(em
);
1549 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
1553 em
= alloc_extent_map(GFP_NOFS
);
1556 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1557 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1559 free_extent_map(em
);
1563 em
->bdev
= (struct block_device
*)map
;
1564 em
->start
= logical
;
1566 em
->block_start
= 0;
1568 map
->num_stripes
= num_stripes
;
1569 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1570 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1571 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1572 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1573 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1574 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1575 for (i
= 0; i
< num_stripes
; i
++) {
1576 map
->stripes
[i
].physical
=
1577 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1578 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1579 read_extent_buffer(leaf
, uuid
, (unsigned long)
1580 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1582 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
);
1583 if (!map
->stripes
[i
].dev
) {
1585 free_extent_map(em
);
1590 spin_lock(&map_tree
->map_tree
.lock
);
1591 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
1592 spin_unlock(&map_tree
->map_tree
.lock
);
1594 free_extent_map(em
);
1599 static int fill_device_from_item(struct extent_buffer
*leaf
,
1600 struct btrfs_dev_item
*dev_item
,
1601 struct btrfs_device
*device
)
1605 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1606 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1607 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1608 device
->type
= btrfs_device_type(leaf
, dev_item
);
1609 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1610 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1611 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1613 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1614 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1619 static int read_one_dev(struct btrfs_root
*root
,
1620 struct extent_buffer
*leaf
,
1621 struct btrfs_dev_item
*dev_item
)
1623 struct btrfs_device
*device
;
1626 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1628 devid
= btrfs_device_id(leaf
, dev_item
);
1629 read_extent_buffer(leaf
, dev_uuid
,
1630 (unsigned long)btrfs_device_uuid(dev_item
),
1632 device
= btrfs_find_device(root
, devid
, dev_uuid
);
1634 printk("warning devid %Lu not found already\n", devid
);
1635 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1638 list_add(&device
->dev_list
,
1639 &root
->fs_info
->fs_devices
->devices
);
1640 list_add(&device
->dev_alloc_list
,
1641 &root
->fs_info
->fs_devices
->alloc_list
);
1642 device
->barriers
= 1;
1643 spin_lock_init(&device
->io_lock
);
1646 fill_device_from_item(leaf
, dev_item
, device
);
1647 device
->dev_root
= root
->fs_info
->dev_root
;
1650 ret
= btrfs_open_device(device
);
1658 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
1660 struct btrfs_dev_item
*dev_item
;
1662 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
1664 return read_one_dev(root
, buf
, dev_item
);
1667 int btrfs_read_sys_array(struct btrfs_root
*root
)
1669 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1670 struct extent_buffer
*sb
= root
->fs_info
->sb_buffer
;
1671 struct btrfs_disk_key
*disk_key
;
1672 struct btrfs_chunk
*chunk
;
1674 unsigned long sb_ptr
;
1680 struct btrfs_key key
;
1682 array_size
= btrfs_super_sys_array_size(super_copy
);
1684 ptr
= super_copy
->sys_chunk_array
;
1685 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1688 while (cur
< array_size
) {
1689 disk_key
= (struct btrfs_disk_key
*)ptr
;
1690 btrfs_disk_key_to_cpu(&key
, disk_key
);
1692 len
= sizeof(*disk_key
);
1697 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1698 chunk
= (struct btrfs_chunk
*)sb_ptr
;
1699 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
1702 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1703 len
= btrfs_chunk_item_size(num_stripes
);
1715 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1717 struct btrfs_path
*path
;
1718 struct extent_buffer
*leaf
;
1719 struct btrfs_key key
;
1720 struct btrfs_key found_key
;
1724 root
= root
->fs_info
->chunk_root
;
1726 path
= btrfs_alloc_path();
1730 /* first we search for all of the device items, and then we
1731 * read in all of the chunk items. This way we can create chunk
1732 * mappings that reference all of the devices that are afound
1734 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1738 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1740 leaf
= path
->nodes
[0];
1741 slot
= path
->slots
[0];
1742 if (slot
>= btrfs_header_nritems(leaf
)) {
1743 ret
= btrfs_next_leaf(root
, path
);
1750 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1751 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1752 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
1754 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
1755 struct btrfs_dev_item
*dev_item
;
1756 dev_item
= btrfs_item_ptr(leaf
, slot
,
1757 struct btrfs_dev_item
);
1758 ret
= read_one_dev(root
, leaf
, dev_item
);
1761 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1762 struct btrfs_chunk
*chunk
;
1763 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
1764 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
1768 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1770 btrfs_release_path(root
, path
);
1774 btrfs_free_path(path
);