2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #define _XOPEN_SOURCE 600
22 #include <sys/types.h>
24 #include <uuid/uuid.h>
29 #include "transaction.h"
30 #include "print-tree.h"
34 struct btrfs_device
*dev
;
39 struct cache_extent ce
;
47 struct btrfs_bio_stripe stripes
[];
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static LIST_HEAD(fs_uuids
);
55 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
58 struct btrfs_device
*dev
;
59 struct list_head
*cur
;
61 list_for_each(cur
, head
) {
62 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
63 if (dev
->devid
== devid
&&
64 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
71 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
73 struct list_head
*cur
;
74 struct btrfs_fs_devices
*fs_devices
;
76 list_for_each(cur
, &fs_uuids
) {
77 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
78 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
84 static int device_list_add(const char *path
,
85 struct btrfs_super_block
*disk_super
,
86 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
88 struct btrfs_device
*device
;
89 struct btrfs_fs_devices
*fs_devices
;
90 u64 found_transid
= btrfs_super_generation(disk_super
);
92 fs_devices
= find_fsid(disk_super
->fsid
);
94 fs_devices
= kmalloc(sizeof(*fs_devices
), GFP_NOFS
);
97 INIT_LIST_HEAD(&fs_devices
->devices
);
98 list_add(&fs_devices
->list
, &fs_uuids
);
99 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
100 fs_devices
->latest_devid
= devid
;
101 fs_devices
->latest_trans
= found_transid
;
102 fs_devices
->lowest_devid
= (u64
)-1;
105 device
= __find_device(&fs_devices
->devices
, devid
,
106 disk_super
->dev_item
.uuid
);
109 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
111 /* we can safely leave the fs_devices entry around */
114 device
->devid
= devid
;
115 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
117 device
->name
= kstrdup(path
, GFP_NOFS
);
122 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
123 device
->total_devs
= btrfs_super_num_devices(disk_super
);
124 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
125 device
->total_bytes
=
126 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
128 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
129 list_add(&device
->dev_list
, &fs_devices
->devices
);
132 if (found_transid
> fs_devices
->latest_trans
) {
133 fs_devices
->latest_devid
= devid
;
134 fs_devices
->latest_trans
= found_transid
;
136 if (fs_devices
->lowest_devid
> devid
) {
137 fs_devices
->lowest_devid
= devid
;
139 *fs_devices_ret
= fs_devices
;
143 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
145 struct list_head
*head
= &fs_devices
->devices
;
146 struct list_head
*cur
;
147 struct btrfs_device
*device
;
149 list_for_each(cur
, head
) {
150 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
157 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
160 struct list_head
*head
= &fs_devices
->devices
;
161 struct list_head
*cur
;
162 struct btrfs_device
*device
;
165 list_for_each(cur
, head
) {
166 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
168 fd
= open(device
->name
, flags
);
174 if (device
->devid
== fs_devices
->latest_devid
)
175 fs_devices
->latest_bdev
= fd
;
176 if (device
->devid
== fs_devices
->lowest_devid
)
177 fs_devices
->lowest_bdev
= fd
;
182 btrfs_close_devices(fs_devices
);
186 int btrfs_scan_one_device(int fd
, const char *path
,
187 struct btrfs_fs_devices
**fs_devices_ret
,
188 u64
*total_devs
, u64 super_offset
)
190 struct btrfs_super_block
*disk_super
;
201 ret
= pread(fd
, buf
, 4096, super_offset
);
206 disk_super
= (struct btrfs_super_block
*)buf
;
207 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
208 sizeof(disk_super
->magic
))) {
212 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
213 *total_devs
= btrfs_super_num_devices(disk_super
);
214 uuid_unparse(disk_super
->fsid
, uuidbuf
);
216 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
225 * this uses a pretty simple search, the expectation is that it is
226 * called very infrequently and that a given device has a small number
229 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
230 struct btrfs_device
*device
,
231 struct btrfs_path
*path
,
232 u64 num_bytes
, u64
*start
)
234 struct btrfs_key key
;
235 struct btrfs_root
*root
= device
->dev_root
;
236 struct btrfs_dev_extent
*dev_extent
= NULL
;
239 u64 search_start
= 0;
240 u64 search_end
= device
->total_bytes
;
244 struct extent_buffer
*l
;
249 /* FIXME use last free of some kind */
251 /* we don't want to overwrite the superblock on the drive,
252 * so we make sure to start at an offset of at least 1MB
254 search_start
= max((u64
)1024 * 1024, search_start
);
256 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
257 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
259 key
.objectid
= device
->devid
;
260 key
.offset
= search_start
;
261 key
.type
= BTRFS_DEV_EXTENT_KEY
;
262 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
265 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
269 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
272 slot
= path
->slots
[0];
273 if (slot
>= btrfs_header_nritems(l
)) {
274 ret
= btrfs_next_leaf(root
, path
);
281 if (search_start
>= search_end
) {
285 *start
= search_start
;
289 *start
= last_byte
> search_start
?
290 last_byte
: search_start
;
291 if (search_end
<= *start
) {
297 btrfs_item_key_to_cpu(l
, &key
, slot
);
299 if (key
.objectid
< device
->devid
)
302 if (key
.objectid
> device
->devid
)
305 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
307 if (last_byte
< search_start
)
308 last_byte
= search_start
;
309 hole_size
= key
.offset
- last_byte
;
310 if (key
.offset
> last_byte
&&
311 hole_size
>= num_bytes
) {
316 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
321 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
322 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
328 /* we have to make sure we didn't find an extent that has already
329 * been allocated by the map tree or the original allocation
331 btrfs_release_path(root
, path
);
332 BUG_ON(*start
< search_start
);
334 if (*start
+ num_bytes
> search_end
) {
338 /* check for pending inserts here */
342 btrfs_release_path(root
, path
);
346 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
347 struct btrfs_device
*device
,
348 u64 chunk_tree
, u64 chunk_objectid
,
350 u64 num_bytes
, u64
*start
)
353 struct btrfs_path
*path
;
354 struct btrfs_root
*root
= device
->dev_root
;
355 struct btrfs_dev_extent
*extent
;
356 struct extent_buffer
*leaf
;
357 struct btrfs_key key
;
359 path
= btrfs_alloc_path();
363 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
368 key
.objectid
= device
->devid
;
370 key
.type
= BTRFS_DEV_EXTENT_KEY
;
371 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
375 leaf
= path
->nodes
[0];
376 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
377 struct btrfs_dev_extent
);
378 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
379 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
380 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
382 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
383 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
386 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
387 btrfs_mark_buffer_dirty(leaf
);
389 btrfs_free_path(path
);
393 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
395 struct btrfs_path
*path
;
397 struct btrfs_key key
;
398 struct btrfs_chunk
*chunk
;
399 struct btrfs_key found_key
;
401 path
= btrfs_alloc_path();
404 key
.objectid
= objectid
;
405 key
.offset
= (u64
)-1;
406 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
408 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
414 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
418 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
420 if (found_key
.objectid
!= objectid
)
423 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
425 *offset
= found_key
.offset
+
426 btrfs_chunk_length(path
->nodes
[0], chunk
);
431 btrfs_free_path(path
);
435 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
439 struct btrfs_key key
;
440 struct btrfs_key found_key
;
442 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
443 key
.type
= BTRFS_DEV_ITEM_KEY
;
444 key
.offset
= (u64
)-1;
446 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
452 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
457 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
459 *objectid
= found_key
.offset
+ 1;
463 btrfs_release_path(root
, path
);
468 * the device information is stored in the chunk root
469 * the btrfs_device struct should be fully filled in
471 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
472 struct btrfs_root
*root
,
473 struct btrfs_device
*device
)
476 struct btrfs_path
*path
;
477 struct btrfs_dev_item
*dev_item
;
478 struct extent_buffer
*leaf
;
479 struct btrfs_key key
;
483 root
= root
->fs_info
->chunk_root
;
485 path
= btrfs_alloc_path();
489 ret
= find_next_devid(root
, path
, &free_devid
);
493 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
494 key
.type
= BTRFS_DEV_ITEM_KEY
;
495 key
.offset
= free_devid
;
497 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
502 leaf
= path
->nodes
[0];
503 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
505 device
->devid
= free_devid
;
506 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
507 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
508 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
509 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
510 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
511 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
512 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
513 btrfs_set_device_group(leaf
, dev_item
, 0);
514 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
515 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
517 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
518 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
519 btrfs_mark_buffer_dirty(leaf
);
523 btrfs_free_path(path
);
527 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
528 struct btrfs_device
*device
)
531 struct btrfs_path
*path
;
532 struct btrfs_root
*root
;
533 struct btrfs_dev_item
*dev_item
;
534 struct extent_buffer
*leaf
;
535 struct btrfs_key key
;
537 root
= device
->dev_root
->fs_info
->chunk_root
;
539 path
= btrfs_alloc_path();
543 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
544 key
.type
= BTRFS_DEV_ITEM_KEY
;
545 key
.offset
= device
->devid
;
547 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
556 leaf
= path
->nodes
[0];
557 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
559 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
560 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
561 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
562 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
563 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
564 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
565 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
566 btrfs_mark_buffer_dirty(leaf
);
569 btrfs_free_path(path
);
573 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
574 struct btrfs_root
*root
,
575 struct btrfs_key
*key
,
576 struct btrfs_chunk
*chunk
, int item_size
)
578 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
579 struct btrfs_disk_key disk_key
;
583 array_size
= btrfs_super_sys_array_size(super_copy
);
584 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
587 ptr
= super_copy
->sys_chunk_array
+ array_size
;
588 btrfs_cpu_key_to_disk(&disk_key
, key
);
589 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
590 ptr
+= sizeof(disk_key
);
591 memcpy(ptr
, chunk
, item_size
);
592 item_size
+= sizeof(disk_key
);
593 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
597 static u64
div_factor(u64 num
, int factor
)
605 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
608 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
610 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
611 return calc_size
* (num_stripes
/ sub_stripes
);
613 return calc_size
* num_stripes
;
617 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
618 struct btrfs_root
*extent_root
, u64
*start
,
619 u64
*num_bytes
, u64 type
)
622 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
623 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
624 struct btrfs_stripe
*stripes
;
625 struct btrfs_device
*device
= NULL
;
626 struct btrfs_chunk
*chunk
;
627 struct list_head private_devs
;
628 struct list_head
*dev_list
= &extent_root
->fs_info
->fs_devices
->devices
;
629 struct list_head
*cur
;
630 struct map_lookup
*map
;
631 int min_stripe_size
= 1 * 1024 * 1024;
633 u64 calc_size
= 8 * 1024 * 1024;
635 u64 max_chunk_size
= 4 * calc_size
;
645 int stripe_len
= 64 * 1024;
646 struct btrfs_key key
;
648 if (list_empty(dev_list
)) {
652 if (type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
653 BTRFS_BLOCK_GROUP_RAID10
|
654 BTRFS_BLOCK_GROUP_DUP
)) {
655 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
656 calc_size
= 8 * 1024 * 1024;
657 max_chunk_size
= calc_size
* 2;
658 min_stripe_size
= 1 * 1024 * 1024;
659 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
660 calc_size
= 1024 * 1024 * 1024;
661 max_chunk_size
= 10 * calc_size
;
662 min_stripe_size
= 64 * 1024 * 1024;
663 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
664 calc_size
= 1024 * 1024 * 1024;
665 max_chunk_size
= 4 * calc_size
;
666 min_stripe_size
= 32 * 1024 * 1024;
669 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
670 num_stripes
= min_t(u64
, 2,
671 btrfs_super_num_devices(&info
->super_copy
));
676 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
680 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
681 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
684 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
685 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
688 num_stripes
&= ~(u32
)1;
693 /* we don't want a chunk larger than 10% of the FS */
694 percent_max
= div_factor(btrfs_super_total_bytes(&info
->super_copy
), 1);
695 max_chunk_size
= min(percent_max
, max_chunk_size
);
698 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
700 calc_size
= max_chunk_size
;
701 calc_size
/= num_stripes
;
702 calc_size
/= stripe_len
;
703 calc_size
*= stripe_len
;
705 /* we don't want tiny stripes */
706 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
708 calc_size
/= stripe_len
;
709 calc_size
*= stripe_len
;
710 INIT_LIST_HEAD(&private_devs
);
711 cur
= dev_list
->next
;
714 if (type
& BTRFS_BLOCK_GROUP_DUP
)
715 min_free
= calc_size
* 2;
717 min_free
= calc_size
;
719 /* build a private list of devices we will allocate from */
720 while(index
< num_stripes
) {
721 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
722 avail
= device
->total_bytes
- device
->bytes_used
;
724 if (avail
>= min_free
) {
725 list_move_tail(&device
->dev_list
, &private_devs
);
727 if (type
& BTRFS_BLOCK_GROUP_DUP
)
729 } else if (avail
> max_avail
)
734 if (index
< num_stripes
) {
735 list_splice(&private_devs
, dev_list
);
736 if (index
>= min_stripes
) {
738 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
739 num_stripes
/= sub_stripes
;
740 num_stripes
*= sub_stripes
;
745 if (!looped
&& max_avail
> 0) {
747 calc_size
= max_avail
;
752 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
753 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
754 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
759 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
763 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
769 stripes
= &chunk
->stripe
;
770 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
771 num_stripes
, sub_stripes
);
773 while(index
< num_stripes
) {
774 struct btrfs_stripe
*stripe
;
775 BUG_ON(list_empty(&private_devs
));
776 cur
= private_devs
.next
;
777 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
779 /* loop over this device again if we're doing a dup group */
780 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
781 (index
== num_stripes
- 1))
782 list_move_tail(&device
->dev_list
, dev_list
);
784 ret
= btrfs_alloc_dev_extent(trans
, device
,
785 info
->chunk_root
->root_key
.objectid
,
786 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
787 calc_size
, &dev_offset
);
790 device
->bytes_used
+= calc_size
;
791 ret
= btrfs_update_device(trans
, device
);
794 map
->stripes
[index
].dev
= device
;
795 map
->stripes
[index
].physical
= dev_offset
;
796 stripe
= stripes
+ index
;
797 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
798 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
799 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
800 physical
= dev_offset
;
803 BUG_ON(!list_empty(&private_devs
));
805 /* key was set above */
806 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
807 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
808 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
809 btrfs_set_stack_chunk_type(chunk
, type
);
810 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
811 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
812 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
813 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
814 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
815 map
->sector_size
= extent_root
->sectorsize
;
816 map
->stripe_len
= stripe_len
;
817 map
->io_align
= stripe_len
;
818 map
->io_width
= stripe_len
;
820 map
->num_stripes
= num_stripes
;
821 map
->sub_stripes
= sub_stripes
;
823 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
824 btrfs_chunk_item_size(num_stripes
));
826 *start
= key
.offset
;;
828 map
->ce
.start
= key
.offset
;
829 map
->ce
.size
= *num_bytes
;
831 ret
= insert_existing_cache_extent(
832 &extent_root
->fs_info
->mapping_tree
.cache_tree
,
836 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
837 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
838 chunk
, btrfs_chunk_item_size(num_stripes
));
846 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
848 cache_tree_init(&tree
->cache_tree
);
851 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
853 struct cache_extent
*ce
;
854 struct map_lookup
*map
;
858 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
860 BUG_ON(ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
);
861 map
= container_of(ce
, struct map_lookup
, ce
);
863 offset
= logical
- ce
->start
;
864 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
865 ret
= map
->num_stripes
;
866 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
867 ret
= map
->sub_stripes
;
873 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
874 u64 logical
, u64
*length
,
875 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
877 struct cache_extent
*ce
;
878 struct map_lookup
*map
;
882 int stripes_allocated
= 8;
883 int stripes_required
= 1;
886 struct btrfs_multi_bio
*multi
= NULL
;
888 if (multi_ret
&& rw
== READ
) {
889 stripes_allocated
= 1;
893 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
899 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
901 BUG_ON(ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
);
902 map
= container_of(ce
, struct map_lookup
, ce
);
903 offset
= logical
- ce
->start
;
906 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
907 BTRFS_BLOCK_GROUP_DUP
)) {
908 stripes_required
= map
->num_stripes
;
909 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
910 stripes_required
= map
->sub_stripes
;
913 /* if our multi bio struct is too small, back off and try again */
914 if (multi_ret
&& rw
== WRITE
&&
915 stripes_allocated
< stripes_required
) {
916 stripes_allocated
= map
->num_stripes
;
922 * stripe_nr counts the total number of stripes we have to stride
923 * to get to this block
925 stripe_nr
= stripe_nr
/ map
->stripe_len
;
927 stripe_offset
= stripe_nr
* map
->stripe_len
;
928 BUG_ON(offset
< stripe_offset
);
930 /* stripe_offset is the offset of this block in its stripe*/
931 stripe_offset
= offset
- stripe_offset
;
933 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
934 BTRFS_BLOCK_GROUP_RAID10
|
935 BTRFS_BLOCK_GROUP_DUP
)) {
936 /* we limit the length of each bio to what fits in a stripe */
937 *length
= min_t(u64
, ce
->size
- offset
,
938 map
->stripe_len
- stripe_offset
);
940 *length
= ce
->size
- offset
;
946 multi
->num_stripes
= 1;
948 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
950 multi
->num_stripes
= map
->num_stripes
;
952 stripe_index
= mirror_num
- 1;
954 stripe_index
= stripe_nr
% map
->num_stripes
;
955 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
956 int factor
= map
->num_stripes
/ map
->sub_stripes
;
958 stripe_index
= stripe_nr
% factor
;
959 stripe_index
*= map
->sub_stripes
;
962 multi
->num_stripes
= map
->sub_stripes
;
964 stripe_index
+= mirror_num
- 1;
966 stripe_index
= stripe_nr
% map
->sub_stripes
;
968 stripe_nr
= stripe_nr
/ factor
;
969 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
971 multi
->num_stripes
= map
->num_stripes
;
973 stripe_index
= mirror_num
- 1;
976 * after this do_div call, stripe_nr is the number of stripes
977 * on this device we have to walk to find the data, and
978 * stripe_index is the number of our device in the stripe array
980 stripe_index
= stripe_nr
% map
->num_stripes
;
981 stripe_nr
= stripe_nr
/ map
->num_stripes
;
983 BUG_ON(stripe_index
>= map
->num_stripes
);
985 BUG_ON(stripe_index
!= 0 && multi
->num_stripes
> 1);
986 for (i
= 0; i
< multi
->num_stripes
; i
++) {
987 multi
->stripes
[i
].physical
=
988 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
989 stripe_nr
* map
->stripe_len
;
990 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
998 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1001 struct list_head
*head
= &root
->fs_info
->fs_devices
->devices
;
1003 return __find_device(head
, devid
, uuid
);
1006 int btrfs_bootstrap_super_map(struct btrfs_mapping_tree
*map_tree
,
1007 struct btrfs_fs_devices
*fs_devices
)
1009 struct map_lookup
*map
;
1010 u64 logical
= BTRFS_SUPER_INFO_OFFSET
;
1011 u64 length
= BTRFS_SUPER_INFO_SIZE
;
1012 int num_stripes
= 0;
1013 int sub_stripes
= 0;
1016 struct list_head
*cur
;
1018 list_for_each(cur
, &fs_devices
->devices
) {
1021 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1025 map
->ce
.start
= logical
;
1026 map
->ce
.size
= length
;
1027 map
->num_stripes
= num_stripes
;
1028 map
->sub_stripes
= sub_stripes
;
1029 map
->io_width
= length
;
1030 map
->io_align
= length
;
1031 map
->sector_size
= length
;
1032 map
->stripe_len
= length
;
1033 map
->type
= BTRFS_BLOCK_GROUP_RAID1
;
1036 list_for_each(cur
, &fs_devices
->devices
) {
1037 struct btrfs_device
*device
= list_entry(cur
,
1038 struct btrfs_device
,
1040 map
->stripes
[i
].physical
= logical
;
1041 map
->stripes
[i
].dev
= device
;
1044 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1045 if (ret
== -EEXIST
) {
1046 struct cache_extent
*old
;
1047 struct map_lookup
*old_map
;
1048 old
= find_cache_extent(&map_tree
->cache_tree
, logical
, length
);
1049 old_map
= container_of(old
, struct map_lookup
, ce
);
1050 remove_cache_extent(&map_tree
->cache_tree
, old
);
1052 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
,
1059 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1060 struct extent_buffer
*leaf
,
1061 struct btrfs_chunk
*chunk
)
1063 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1064 struct map_lookup
*map
;
1065 struct cache_extent
*ce
;
1069 u64 super_offset_diff
= 0;
1070 u8 uuid
[BTRFS_UUID_SIZE
];
1075 logical
= key
->offset
;
1076 length
= btrfs_chunk_length(leaf
, chunk
);
1078 if (logical
< BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
) {
1079 super_offset_diff
= BTRFS_SUPER_INFO_OFFSET
+
1080 BTRFS_SUPER_INFO_SIZE
- logical
;
1081 logical
= BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
;
1084 ce
= find_first_cache_extent(&map_tree
->cache_tree
, logical
);
1086 /* already mapped? */
1087 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1091 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1092 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1096 map
->ce
.start
= logical
;
1097 map
->ce
.size
= length
- super_offset_diff
;
1098 map
->num_stripes
= num_stripes
;
1099 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1100 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1101 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1102 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1103 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1104 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1106 for (i
= 0; i
< num_stripes
; i
++) {
1107 map
->stripes
[i
].physical
=
1108 btrfs_stripe_offset_nr(leaf
, chunk
, i
) +
1110 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1111 read_extent_buffer(leaf
, uuid
, (unsigned long)
1112 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1114 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
);
1115 if (!map
->stripes
[i
].dev
) {
1121 ret
= insert_existing_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1127 static int fill_device_from_item(struct extent_buffer
*leaf
,
1128 struct btrfs_dev_item
*dev_item
,
1129 struct btrfs_device
*device
)
1133 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1134 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1135 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1136 device
->type
= btrfs_device_type(leaf
, dev_item
);
1137 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1138 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1139 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1141 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1142 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1147 static int read_one_dev(struct btrfs_root
*root
,
1148 struct extent_buffer
*leaf
,
1149 struct btrfs_dev_item
*dev_item
)
1151 struct btrfs_device
*device
;
1154 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1156 devid
= btrfs_device_id(leaf
, dev_item
);
1157 read_extent_buffer(leaf
, dev_uuid
,
1158 (unsigned long)btrfs_device_uuid(dev_item
),
1160 device
= btrfs_find_device(root
, devid
, dev_uuid
);
1162 printk("warning devid %llu not found already\n",
1163 (unsigned long long)devid
);
1164 device
= kmalloc(sizeof(*device
), GFP_NOFS
);
1167 device
->total_ios
= 0;
1168 list_add(&device
->dev_list
,
1169 &root
->fs_info
->fs_devices
->devices
);
1172 fill_device_from_item(leaf
, dev_item
, device
);
1173 device
->dev_root
= root
->fs_info
->dev_root
;
1177 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
1179 struct btrfs_dev_item
*dev_item
;
1181 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
1183 return read_one_dev(root
, buf
, dev_item
);
1186 int btrfs_read_sys_array(struct btrfs_root
*root
)
1188 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1189 struct extent_buffer
*sb
= root
->fs_info
->sb_buffer
;
1190 struct btrfs_disk_key
*disk_key
;
1191 struct btrfs_chunk
*chunk
;
1192 struct btrfs_key key
;
1197 unsigned long sb_ptr
;
1201 array_size
= btrfs_super_sys_array_size(super_copy
);
1204 * we do this loop twice, once for the device items and
1205 * once for all of the chunks. This way there are device
1206 * structs filled in for every chunk
1208 ptr
= super_copy
->sys_chunk_array
;
1209 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1212 while (cur
< array_size
) {
1213 disk_key
= (struct btrfs_disk_key
*)ptr
;
1214 btrfs_disk_key_to_cpu(&key
, disk_key
);
1216 len
= sizeof(*disk_key
);
1221 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1222 chunk
= (struct btrfs_chunk
*)sb_ptr
;
1223 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
1225 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1226 len
= btrfs_chunk_item_size(num_stripes
);
1237 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1239 struct btrfs_path
*path
;
1240 struct extent_buffer
*leaf
;
1241 struct btrfs_key key
;
1242 struct btrfs_key found_key
;
1246 root
= root
->fs_info
->chunk_root
;
1248 path
= btrfs_alloc_path();
1252 /* first we search for all of the device items, and then we
1253 * read in all of the chunk items. This way we can create chunk
1254 * mappings that reference all of the devices that are afound
1256 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1260 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1262 leaf
= path
->nodes
[0];
1263 slot
= path
->slots
[0];
1264 if (slot
>= btrfs_header_nritems(leaf
)) {
1265 ret
= btrfs_next_leaf(root
, path
);
1272 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1273 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1274 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
1276 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
1277 struct btrfs_dev_item
*dev_item
;
1278 dev_item
= btrfs_item_ptr(leaf
, slot
,
1279 struct btrfs_dev_item
);
1280 ret
= read_one_dev(root
, leaf
, dev_item
);
1283 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1284 struct btrfs_chunk
*chunk
;
1285 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
1286 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
1290 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1292 btrfs_release_path(root
, path
);
1296 btrfs_free_path(path
);
1302 struct list_head
*btrfs_scanned_uuids(void)