2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <sys/types.h>
22 #include <uuid/uuid.h>
27 #include "transaction.h"
28 #include "print-tree.h"
33 struct btrfs_device
*dev
;
37 static inline int nr_parity_stripes(struct map_lookup
*map
)
39 if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
41 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
47 static inline int nr_data_stripes(struct map_lookup
*map
)
49 return map
->num_stripes
- nr_parity_stripes(map
);
52 #define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
54 static LIST_HEAD(fs_uuids
);
56 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
59 struct btrfs_device
*dev
;
60 struct list_head
*cur
;
62 list_for_each(cur
, head
) {
63 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
64 if (dev
->devid
== devid
&&
65 !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
)) {
72 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
74 struct list_head
*cur
;
75 struct btrfs_fs_devices
*fs_devices
;
77 list_for_each(cur
, &fs_uuids
) {
78 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
79 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
85 static int device_list_add(const char *path
,
86 struct btrfs_super_block
*disk_super
,
87 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
89 struct btrfs_device
*device
;
90 struct btrfs_fs_devices
*fs_devices
;
91 u64 found_transid
= btrfs_super_generation(disk_super
);
93 fs_devices
= find_fsid(disk_super
->fsid
);
95 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
98 INIT_LIST_HEAD(&fs_devices
->devices
);
99 list_add(&fs_devices
->list
, &fs_uuids
);
100 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
101 fs_devices
->latest_devid
= devid
;
102 fs_devices
->latest_trans
= found_transid
;
103 fs_devices
->lowest_devid
= (u64
)-1;
106 device
= __find_device(&fs_devices
->devices
, devid
,
107 disk_super
->dev_item
.uuid
);
110 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
112 /* we can safely leave the fs_devices entry around */
116 device
->devid
= devid
;
117 device
->generation
= found_transid
;
118 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
120 device
->name
= kstrdup(path
, GFP_NOFS
);
125 device
->label
= kstrdup(disk_super
->label
, GFP_NOFS
);
126 if (!device
->label
) {
131 device
->total_devs
= btrfs_super_num_devices(disk_super
);
132 device
->super_bytes_used
= btrfs_super_bytes_used(disk_super
);
133 device
->total_bytes
=
134 btrfs_stack_device_total_bytes(&disk_super
->dev_item
);
136 btrfs_stack_device_bytes_used(&disk_super
->dev_item
);
137 list_add(&device
->dev_list
, &fs_devices
->devices
);
138 device
->fs_devices
= fs_devices
;
139 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
140 char *name
= strdup(path
);
148 if (found_transid
> fs_devices
->latest_trans
) {
149 fs_devices
->latest_devid
= devid
;
150 fs_devices
->latest_trans
= found_transid
;
152 if (fs_devices
->lowest_devid
> devid
) {
153 fs_devices
->lowest_devid
= devid
;
155 *fs_devices_ret
= fs_devices
;
159 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
161 struct btrfs_fs_devices
*seed_devices
;
162 struct btrfs_device
*device
;
167 while (!list_empty(&fs_devices
->devices
)) {
168 device
= list_entry(fs_devices
->devices
.next
,
169 struct btrfs_device
, dev_list
);
170 if (device
->fd
!= -1) {
172 if (posix_fadvise(device
->fd
, 0, 0, POSIX_FADV_DONTNEED
))
173 fprintf(stderr
, "Warning, could not drop caches\n");
177 device
->writeable
= 0;
178 list_del(&device
->dev_list
);
179 /* free the memory */
185 seed_devices
= fs_devices
->seed
;
186 fs_devices
->seed
= NULL
;
188 struct btrfs_fs_devices
*orig
;
191 fs_devices
= seed_devices
;
192 list_del(&orig
->list
);
196 list_del(&fs_devices
->list
);
203 void btrfs_close_all_devices(void)
205 struct btrfs_fs_devices
*fs_devices
;
207 while (!list_empty(&fs_uuids
)) {
208 fs_devices
= list_entry(fs_uuids
.next
, struct btrfs_fs_devices
,
210 btrfs_close_devices(fs_devices
);
214 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
, int flags
)
217 struct list_head
*head
= &fs_devices
->devices
;
218 struct list_head
*cur
;
219 struct btrfs_device
*device
;
222 list_for_each(cur
, head
) {
223 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
225 printk("no name for device %llu, skip it now\n", device
->devid
);
229 fd
= open(device
->name
, flags
);
232 error("cannot open device '%s': %s", device
->name
,
237 if (posix_fadvise(fd
, 0, 0, POSIX_FADV_DONTNEED
))
238 fprintf(stderr
, "Warning, could not drop caches\n");
240 if (device
->devid
== fs_devices
->latest_devid
)
241 fs_devices
->latest_bdev
= fd
;
242 if (device
->devid
== fs_devices
->lowest_devid
)
243 fs_devices
->lowest_bdev
= fd
;
246 device
->writeable
= 1;
250 btrfs_close_devices(fs_devices
);
254 int btrfs_scan_one_device(int fd
, const char *path
,
255 struct btrfs_fs_devices
**fs_devices_ret
,
256 u64
*total_devs
, u64 super_offset
, unsigned sbflags
)
258 struct btrfs_super_block
*disk_super
;
259 char buf
[BTRFS_SUPER_INFO_SIZE
];
263 disk_super
= (struct btrfs_super_block
*)buf
;
264 ret
= btrfs_read_dev_super(fd
, disk_super
, super_offset
, sbflags
);
267 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
268 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_METADUMP
)
271 *total_devs
= btrfs_super_num_devices(disk_super
);
273 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
279 * this uses a pretty simple search, the expectation is that it is
280 * called very infrequently and that a given device has a small number
283 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
284 struct btrfs_device
*device
,
285 struct btrfs_path
*path
,
286 u64 num_bytes
, u64
*start
)
288 struct btrfs_key key
;
289 struct btrfs_root
*root
= device
->dev_root
;
290 struct btrfs_dev_extent
*dev_extent
= NULL
;
293 u64 search_start
= root
->fs_info
->alloc_start
;
294 u64 search_end
= device
->total_bytes
;
298 struct extent_buffer
*l
;
303 /* FIXME use last free of some kind */
305 /* we don't want to overwrite the superblock on the drive,
306 * so we make sure to start at an offset of at least 1MB
308 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
310 if (search_start
>= search_end
) {
315 key
.objectid
= device
->devid
;
316 key
.offset
= search_start
;
317 key
.type
= BTRFS_DEV_EXTENT_KEY
;
318 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
321 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
325 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
328 slot
= path
->slots
[0];
329 if (slot
>= btrfs_header_nritems(l
)) {
330 ret
= btrfs_next_leaf(root
, path
);
337 if (search_start
>= search_end
) {
341 *start
= search_start
;
345 *start
= last_byte
> search_start
?
346 last_byte
: search_start
;
347 if (search_end
<= *start
) {
353 btrfs_item_key_to_cpu(l
, &key
, slot
);
355 if (key
.objectid
< device
->devid
)
358 if (key
.objectid
> device
->devid
)
361 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
363 if (last_byte
< search_start
)
364 last_byte
= search_start
;
365 hole_size
= key
.offset
- last_byte
;
366 if (key
.offset
> last_byte
&&
367 hole_size
>= num_bytes
) {
372 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
) {
377 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
378 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
384 /* we have to make sure we didn't find an extent that has already
385 * been allocated by the map tree or the original allocation
387 btrfs_release_path(path
);
388 BUG_ON(*start
< search_start
);
390 if (*start
+ num_bytes
> search_end
) {
394 /* check for pending inserts here */
398 btrfs_release_path(path
);
402 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
403 struct btrfs_device
*device
,
404 u64 chunk_tree
, u64 chunk_objectid
,
406 u64 num_bytes
, u64
*start
, int convert
)
409 struct btrfs_path
*path
;
410 struct btrfs_root
*root
= device
->dev_root
;
411 struct btrfs_dev_extent
*extent
;
412 struct extent_buffer
*leaf
;
413 struct btrfs_key key
;
415 path
= btrfs_alloc_path();
420 * For convert case, just skip search free dev_extent, as caller
421 * is responsible to make sure it's free.
424 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
,
430 key
.objectid
= device
->devid
;
432 key
.type
= BTRFS_DEV_EXTENT_KEY
;
433 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
437 leaf
= path
->nodes
[0];
438 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
439 struct btrfs_dev_extent
);
440 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
441 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
442 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
444 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
445 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
448 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
449 btrfs_mark_buffer_dirty(leaf
);
451 btrfs_free_path(path
);
455 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
457 struct btrfs_path
*path
;
459 struct btrfs_key key
;
460 struct btrfs_chunk
*chunk
;
461 struct btrfs_key found_key
;
463 path
= btrfs_alloc_path();
467 key
.objectid
= objectid
;
468 key
.offset
= (u64
)-1;
469 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
471 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
477 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
481 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
483 if (found_key
.objectid
!= objectid
)
486 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
488 *offset
= found_key
.offset
+
489 btrfs_chunk_length(path
->nodes
[0], chunk
);
494 btrfs_free_path(path
);
498 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
502 struct btrfs_key key
;
503 struct btrfs_key found_key
;
505 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
506 key
.type
= BTRFS_DEV_ITEM_KEY
;
507 key
.offset
= (u64
)-1;
509 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
515 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
520 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
522 *objectid
= found_key
.offset
+ 1;
526 btrfs_release_path(path
);
531 * the device information is stored in the chunk root
532 * the btrfs_device struct should be fully filled in
534 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
535 struct btrfs_root
*root
,
536 struct btrfs_device
*device
)
539 struct btrfs_path
*path
;
540 struct btrfs_dev_item
*dev_item
;
541 struct extent_buffer
*leaf
;
542 struct btrfs_key key
;
546 root
= root
->fs_info
->chunk_root
;
548 path
= btrfs_alloc_path();
552 ret
= find_next_devid(root
, path
, &free_devid
);
556 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
557 key
.type
= BTRFS_DEV_ITEM_KEY
;
558 key
.offset
= free_devid
;
560 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
565 leaf
= path
->nodes
[0];
566 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
568 device
->devid
= free_devid
;
569 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
570 btrfs_set_device_generation(leaf
, dev_item
, 0);
571 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
572 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
573 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
574 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
575 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
576 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
577 btrfs_set_device_group(leaf
, dev_item
, 0);
578 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
579 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
580 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
582 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
583 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
584 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
585 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
586 btrfs_mark_buffer_dirty(leaf
);
590 btrfs_free_path(path
);
594 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
595 struct btrfs_device
*device
)
598 struct btrfs_path
*path
;
599 struct btrfs_root
*root
;
600 struct btrfs_dev_item
*dev_item
;
601 struct extent_buffer
*leaf
;
602 struct btrfs_key key
;
604 root
= device
->dev_root
->fs_info
->chunk_root
;
606 path
= btrfs_alloc_path();
610 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
611 key
.type
= BTRFS_DEV_ITEM_KEY
;
612 key
.offset
= device
->devid
;
614 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
623 leaf
= path
->nodes
[0];
624 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
626 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
627 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
628 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
629 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
630 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
631 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
632 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
633 btrfs_mark_buffer_dirty(leaf
);
636 btrfs_free_path(path
);
640 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
641 struct btrfs_root
*root
,
642 struct btrfs_key
*key
,
643 struct btrfs_chunk
*chunk
, int item_size
)
645 struct btrfs_super_block
*super_copy
= root
->fs_info
->super_copy
;
646 struct btrfs_disk_key disk_key
;
650 array_size
= btrfs_super_sys_array_size(super_copy
);
651 if (array_size
+ item_size
+ sizeof(disk_key
)
652 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
655 ptr
= super_copy
->sys_chunk_array
+ array_size
;
656 btrfs_cpu_key_to_disk(&disk_key
, key
);
657 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
658 ptr
+= sizeof(disk_key
);
659 memcpy(ptr
, chunk
, item_size
);
660 item_size
+= sizeof(disk_key
);
661 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
665 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
668 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
670 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
671 return calc_size
* (num_stripes
/ sub_stripes
);
672 else if (type
& BTRFS_BLOCK_GROUP_RAID5
)
673 return calc_size
* (num_stripes
- 1);
674 else if (type
& BTRFS_BLOCK_GROUP_RAID6
)
675 return calc_size
* (num_stripes
- 2);
677 return calc_size
* num_stripes
;
681 static u32
find_raid56_stripe_len(u32 data_devices
, u32 dev_stripe_target
)
683 /* TODO, add a way to store the preferred stripe size */
684 return BTRFS_STRIPE_LEN
;
688 * btrfs_device_avail_bytes - count bytes available for alloc_chunk
690 * It is not equal to "device->total_bytes - device->bytes_used".
691 * We do not allocate any chunk in 1M at beginning of device, and not
692 * allowed to allocate any chunk before alloc_start if it is specified.
693 * So search holes from max(1M, alloc_start) to device->total_bytes.
695 static int btrfs_device_avail_bytes(struct btrfs_trans_handle
*trans
,
696 struct btrfs_device
*device
,
699 struct btrfs_path
*path
;
700 struct btrfs_root
*root
= device
->dev_root
;
701 struct btrfs_key key
;
702 struct btrfs_dev_extent
*dev_extent
= NULL
;
703 struct extent_buffer
*l
;
704 u64 search_start
= root
->fs_info
->alloc_start
;
705 u64 search_end
= device
->total_bytes
;
711 search_start
= max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER
, search_start
);
713 path
= btrfs_alloc_path();
717 key
.objectid
= device
->devid
;
718 key
.offset
= root
->fs_info
->alloc_start
;
719 key
.type
= BTRFS_DEV_EXTENT_KEY
;
722 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
725 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
731 slot
= path
->slots
[0];
732 if (slot
>= btrfs_header_nritems(l
)) {
733 ret
= btrfs_next_leaf(root
, path
);
740 btrfs_item_key_to_cpu(l
, &key
, slot
);
742 if (key
.objectid
< device
->devid
)
744 if (key
.objectid
> device
->devid
)
746 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
748 if (key
.offset
> search_end
)
750 if (key
.offset
> search_start
)
751 free_bytes
+= key
.offset
- search_start
;
753 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
754 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
756 if (extent_end
> search_start
)
757 search_start
= extent_end
;
758 if (search_start
> search_end
)
765 if (search_start
< search_end
)
766 free_bytes
+= search_end
- search_start
;
768 *avail_bytes
= free_bytes
;
771 btrfs_free_path(path
);
775 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
776 - sizeof(struct btrfs_item) \
777 - sizeof(struct btrfs_chunk)) \
778 / sizeof(struct btrfs_stripe) + 1)
780 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
781 - 2 * sizeof(struct btrfs_disk_key) \
782 - 2 * sizeof(struct btrfs_chunk)) \
783 / sizeof(struct btrfs_stripe) + 1)
785 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
786 struct btrfs_root
*extent_root
, u64
*start
,
787 u64
*num_bytes
, u64 type
)
790 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
791 struct btrfs_root
*chunk_root
= info
->chunk_root
;
792 struct btrfs_stripe
*stripes
;
793 struct btrfs_device
*device
= NULL
;
794 struct btrfs_chunk
*chunk
;
795 struct list_head private_devs
;
796 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
797 struct list_head
*cur
;
798 struct map_lookup
*map
;
799 int min_stripe_size
= 1 * 1024 * 1024;
800 u64 calc_size
= 8 * 1024 * 1024;
802 u64 max_chunk_size
= 4 * calc_size
;
813 int stripe_len
= BTRFS_STRIPE_LEN
;
814 struct btrfs_key key
;
817 if (list_empty(dev_list
)) {
821 if (type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
822 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
823 BTRFS_BLOCK_GROUP_RAID10
|
824 BTRFS_BLOCK_GROUP_DUP
)) {
825 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
826 calc_size
= 8 * 1024 * 1024;
827 max_chunk_size
= calc_size
* 2;
828 min_stripe_size
= 1 * 1024 * 1024;
829 max_stripes
= BTRFS_MAX_DEVS_SYS_CHUNK
;
830 } else if (type
& BTRFS_BLOCK_GROUP_DATA
) {
831 calc_size
= 1024 * 1024 * 1024;
832 max_chunk_size
= 10 * calc_size
;
833 min_stripe_size
= 64 * 1024 * 1024;
834 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
835 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
836 calc_size
= 1024 * 1024 * 1024;
837 max_chunk_size
= 4 * calc_size
;
838 min_stripe_size
= 32 * 1024 * 1024;
839 max_stripes
= BTRFS_MAX_DEVS(chunk_root
);
842 if (type
& BTRFS_BLOCK_GROUP_RAID1
) {
843 num_stripes
= min_t(u64
, 2,
844 btrfs_super_num_devices(info
->super_copy
));
849 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
853 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
854 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
855 if (num_stripes
> max_stripes
)
856 num_stripes
= max_stripes
;
859 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
860 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
861 if (num_stripes
> max_stripes
)
862 num_stripes
= max_stripes
;
865 num_stripes
&= ~(u32
)1;
869 if (type
& (BTRFS_BLOCK_GROUP_RAID5
)) {
870 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
871 if (num_stripes
> max_stripes
)
872 num_stripes
= max_stripes
;
876 stripe_len
= find_raid56_stripe_len(num_stripes
- 1,
877 btrfs_super_stripesize(info
->super_copy
));
879 if (type
& (BTRFS_BLOCK_GROUP_RAID6
)) {
880 num_stripes
= btrfs_super_num_devices(info
->super_copy
);
881 if (num_stripes
> max_stripes
)
882 num_stripes
= max_stripes
;
886 stripe_len
= find_raid56_stripe_len(num_stripes
- 2,
887 btrfs_super_stripesize(info
->super_copy
));
890 /* we don't want a chunk larger than 10% of the FS */
891 percent_max
= div_factor(btrfs_super_total_bytes(info
->super_copy
), 1);
892 max_chunk_size
= min(percent_max
, max_chunk_size
);
895 if (chunk_bytes_by_type(type
, calc_size
, num_stripes
, sub_stripes
) >
897 calc_size
= max_chunk_size
;
898 calc_size
/= num_stripes
;
899 calc_size
/= stripe_len
;
900 calc_size
*= stripe_len
;
902 /* we don't want tiny stripes */
903 calc_size
= max_t(u64
, calc_size
, min_stripe_size
);
905 calc_size
/= stripe_len
;
906 calc_size
*= stripe_len
;
907 INIT_LIST_HEAD(&private_devs
);
908 cur
= dev_list
->next
;
911 if (type
& BTRFS_BLOCK_GROUP_DUP
)
912 min_free
= calc_size
* 2;
914 min_free
= calc_size
;
916 /* build a private list of devices we will allocate from */
917 while(index
< num_stripes
) {
918 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
919 ret
= btrfs_device_avail_bytes(trans
, device
, &avail
);
923 if (avail
>= min_free
) {
924 list_move_tail(&device
->dev_list
, &private_devs
);
926 if (type
& BTRFS_BLOCK_GROUP_DUP
)
928 } else if (avail
> max_avail
)
933 if (index
< num_stripes
) {
934 list_splice(&private_devs
, dev_list
);
935 if (index
>= min_stripes
) {
937 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
938 num_stripes
/= sub_stripes
;
939 num_stripes
*= sub_stripes
;
944 if (!looped
&& max_avail
> 0) {
946 calc_size
= max_avail
;
951 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
955 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
956 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
959 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
963 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
969 stripes
= &chunk
->stripe
;
970 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
971 num_stripes
, sub_stripes
);
973 while(index
< num_stripes
) {
974 struct btrfs_stripe
*stripe
;
975 BUG_ON(list_empty(&private_devs
));
976 cur
= private_devs
.next
;
977 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
979 /* loop over this device again if we're doing a dup group */
980 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
981 (index
== num_stripes
- 1))
982 list_move_tail(&device
->dev_list
, dev_list
);
984 ret
= btrfs_alloc_dev_extent(trans
, device
,
985 info
->chunk_root
->root_key
.objectid
,
986 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
987 calc_size
, &dev_offset
, 0);
990 device
->bytes_used
+= calc_size
;
991 ret
= btrfs_update_device(trans
, device
);
994 map
->stripes
[index
].dev
= device
;
995 map
->stripes
[index
].physical
= dev_offset
;
996 stripe
= stripes
+ index
;
997 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
998 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
999 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1002 BUG_ON(!list_empty(&private_devs
));
1004 /* key was set above */
1005 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1006 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1007 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1008 btrfs_set_stack_chunk_type(chunk
, type
);
1009 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1010 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1011 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1012 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1013 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1014 map
->sector_size
= extent_root
->sectorsize
;
1015 map
->stripe_len
= stripe_len
;
1016 map
->io_align
= stripe_len
;
1017 map
->io_width
= stripe_len
;
1019 map
->num_stripes
= num_stripes
;
1020 map
->sub_stripes
= sub_stripes
;
1022 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1023 btrfs_chunk_item_size(num_stripes
));
1025 *start
= key
.offset
;;
1027 map
->ce
.start
= key
.offset
;
1028 map
->ce
.size
= *num_bytes
;
1030 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1033 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1034 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
1035 chunk
, btrfs_chunk_item_size(num_stripes
));
1044 * Alloc a DATA chunk with SINGLE profile.
1046 * If 'convert' is set, it will alloc a chunk with 1:1 mapping
1047 * (btrfs logical bytenr == on-disk bytenr)
1048 * For that case, caller must make sure the chunk and dev_extent are not
1051 int btrfs_alloc_data_chunk(struct btrfs_trans_handle
*trans
,
1052 struct btrfs_root
*extent_root
, u64
*start
,
1053 u64 num_bytes
, u64 type
, int convert
)
1056 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1057 struct btrfs_root
*chunk_root
= info
->chunk_root
;
1058 struct btrfs_stripe
*stripes
;
1059 struct btrfs_device
*device
= NULL
;
1060 struct btrfs_chunk
*chunk
;
1061 struct list_head
*dev_list
= &info
->fs_devices
->devices
;
1062 struct list_head
*cur
;
1063 struct map_lookup
*map
;
1064 u64 calc_size
= 8 * 1024 * 1024;
1065 int num_stripes
= 1;
1066 int sub_stripes
= 0;
1069 int stripe_len
= BTRFS_STRIPE_LEN
;
1070 struct btrfs_key key
;
1072 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1073 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1075 if (*start
!= round_down(*start
, extent_root
->sectorsize
)) {
1076 error("DATA chunk start not sectorsize aligned: %llu",
1077 (unsigned long long)*start
);
1080 key
.offset
= *start
;
1081 dev_offset
= *start
;
1085 ret
= find_next_chunk(chunk_root
,
1086 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1093 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1097 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1103 stripes
= &chunk
->stripe
;
1104 calc_size
= num_bytes
;
1107 cur
= dev_list
->next
;
1108 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1110 while (index
< num_stripes
) {
1111 struct btrfs_stripe
*stripe
;
1113 ret
= btrfs_alloc_dev_extent(trans
, device
,
1114 info
->chunk_root
->root_key
.objectid
,
1115 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1116 calc_size
, &dev_offset
, convert
);
1119 device
->bytes_used
+= calc_size
;
1120 ret
= btrfs_update_device(trans
, device
);
1123 map
->stripes
[index
].dev
= device
;
1124 map
->stripes
[index
].physical
= dev_offset
;
1125 stripe
= stripes
+ index
;
1126 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1127 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1128 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1132 /* key was set above */
1133 btrfs_set_stack_chunk_length(chunk
, num_bytes
);
1134 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1135 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1136 btrfs_set_stack_chunk_type(chunk
, type
);
1137 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1138 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1139 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1140 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1141 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1142 map
->sector_size
= extent_root
->sectorsize
;
1143 map
->stripe_len
= stripe_len
;
1144 map
->io_align
= stripe_len
;
1145 map
->io_width
= stripe_len
;
1147 map
->num_stripes
= num_stripes
;
1148 map
->sub_stripes
= sub_stripes
;
1150 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1151 btrfs_chunk_item_size(num_stripes
));
1154 *start
= key
.offset
;
1156 map
->ce
.start
= key
.offset
;
1157 map
->ce
.size
= num_bytes
;
1159 ret
= insert_cache_extent(&info
->mapping_tree
.cache_tree
, &map
->ce
);
1166 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
1168 struct cache_extent
*ce
;
1169 struct map_lookup
*map
;
1172 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1174 fprintf(stderr
, "No mapping for %llu-%llu\n",
1175 (unsigned long long)logical
,
1176 (unsigned long long)logical
+len
);
1179 if (ce
->start
> logical
|| ce
->start
+ ce
->size
< logical
) {
1180 fprintf(stderr
, "Invalid mapping for %llu-%llu, got "
1181 "%llu-%llu\n", (unsigned long long)logical
,
1182 (unsigned long long)logical
+len
,
1183 (unsigned long long)ce
->start
,
1184 (unsigned long long)ce
->start
+ ce
->size
);
1187 map
= container_of(ce
, struct map_lookup
, ce
);
1189 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1190 ret
= map
->num_stripes
;
1191 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1192 ret
= map
->sub_stripes
;
1193 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
1195 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1202 int btrfs_next_bg(struct btrfs_mapping_tree
*map_tree
, u64
*logical
,
1203 u64
*size
, u64 type
)
1205 struct cache_extent
*ce
;
1206 struct map_lookup
*map
;
1209 ce
= search_cache_extent(&map_tree
->cache_tree
, cur
);
1213 * only jump to next bg if our cur is not 0
1214 * As the initial logical for btrfs_next_bg() is 0, and
1215 * if we jump to next bg, we skipped a valid bg.
1218 ce
= next_cache_extent(ce
);
1224 map
= container_of(ce
, struct map_lookup
, ce
);
1225 if (map
->type
& type
) {
1226 *logical
= ce
->start
;
1235 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
1236 u64 chunk_start
, u64 physical
, u64 devid
,
1237 u64
**logical
, int *naddrs
, int *stripe_len
)
1239 struct cache_extent
*ce
;
1240 struct map_lookup
*map
;
1248 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_start
);
1250 map
= container_of(ce
, struct map_lookup
, ce
);
1253 rmap_len
= map
->stripe_len
;
1254 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1255 length
= ce
->size
/ (map
->num_stripes
/ map
->sub_stripes
);
1256 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1257 length
= ce
->size
/ map
->num_stripes
;
1258 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1259 BTRFS_BLOCK_GROUP_RAID6
)) {
1260 length
= ce
->size
/ nr_data_stripes(map
);
1261 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
1264 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1266 for (i
= 0; i
< map
->num_stripes
; i
++) {
1267 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
1269 if (map
->stripes
[i
].physical
> physical
||
1270 map
->stripes
[i
].physical
+ length
<= physical
)
1273 stripe_nr
= (physical
- map
->stripes
[i
].physical
) /
1276 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1277 stripe_nr
= (stripe_nr
* map
->num_stripes
+ i
) /
1279 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1280 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1281 } /* else if RAID[56], multiply by nr_data_stripes().
1282 * Alternatively, just use rmap_len below instead of
1283 * map->stripe_len */
1285 bytenr
= ce
->start
+ stripe_nr
* rmap_len
;
1286 for (j
= 0; j
< nr
; j
++) {
1287 if (buf
[j
] == bytenr
)
1296 *stripe_len
= rmap_len
;
1301 static inline int parity_smaller(u64 a
, u64 b
)
1306 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
1307 static void sort_parity_stripes(struct btrfs_multi_bio
*bbio
, u64
*raid_map
)
1309 struct btrfs_bio_stripe s
;
1316 for (i
= 0; i
< bbio
->num_stripes
- 1; i
++) {
1317 if (parity_smaller(raid_map
[i
], raid_map
[i
+1])) {
1318 s
= bbio
->stripes
[i
];
1320 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
1321 raid_map
[i
] = raid_map
[i
+1];
1322 bbio
->stripes
[i
+1] = s
;
1330 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1331 u64 logical
, u64
*length
,
1332 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1335 return __btrfs_map_block(map_tree
, rw
, logical
, length
, NULL
,
1336 multi_ret
, mirror_num
, raid_map_ret
);
1339 int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1340 u64 logical
, u64
*length
, u64
*type
,
1341 struct btrfs_multi_bio
**multi_ret
, int mirror_num
,
1344 struct cache_extent
*ce
;
1345 struct map_lookup
*map
;
1349 u64
*raid_map
= NULL
;
1350 int stripes_allocated
= 8;
1351 int stripes_required
= 1;
1354 struct btrfs_multi_bio
*multi
= NULL
;
1356 if (multi_ret
&& rw
== READ
) {
1357 stripes_allocated
= 1;
1360 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1366 if (ce
->start
> logical
) {
1368 *length
= ce
->start
- logical
;
1373 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1378 map
= container_of(ce
, struct map_lookup
, ce
);
1379 offset
= logical
- ce
->start
;
1382 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1383 BTRFS_BLOCK_GROUP_DUP
)) {
1384 stripes_required
= map
->num_stripes
;
1385 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1386 stripes_required
= map
->sub_stripes
;
1389 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
)
1390 && multi_ret
&& ((rw
& WRITE
) || mirror_num
> 1) && raid_map_ret
) {
1391 /* RAID[56] write or recovery. Return all stripes */
1392 stripes_required
= map
->num_stripes
;
1394 /* Only allocate the map if we've already got a large enough multi_ret */
1395 if (stripes_allocated
>= stripes_required
) {
1396 raid_map
= kmalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
1404 /* if our multi bio struct is too small, back off and try again */
1405 if (multi_ret
&& stripes_allocated
< stripes_required
) {
1406 stripes_allocated
= stripes_required
;
1413 * stripe_nr counts the total number of stripes we have to stride
1414 * to get to this block
1416 stripe_nr
= stripe_nr
/ map
->stripe_len
;
1418 stripe_offset
= stripe_nr
* map
->stripe_len
;
1419 BUG_ON(offset
< stripe_offset
);
1421 /* stripe_offset is the offset of this block in its stripe*/
1422 stripe_offset
= offset
- stripe_offset
;
1424 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1425 BTRFS_BLOCK_GROUP_RAID5
| BTRFS_BLOCK_GROUP_RAID6
|
1426 BTRFS_BLOCK_GROUP_RAID10
|
1427 BTRFS_BLOCK_GROUP_DUP
)) {
1428 /* we limit the length of each bio to what fits in a stripe */
1429 *length
= min_t(u64
, ce
->size
- offset
,
1430 map
->stripe_len
- stripe_offset
);
1432 *length
= ce
->size
- offset
;
1438 multi
->num_stripes
= 1;
1440 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1442 multi
->num_stripes
= map
->num_stripes
;
1443 else if (mirror_num
)
1444 stripe_index
= mirror_num
- 1;
1446 stripe_index
= stripe_nr
% map
->num_stripes
;
1447 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1448 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1450 stripe_index
= stripe_nr
% factor
;
1451 stripe_index
*= map
->sub_stripes
;
1454 multi
->num_stripes
= map
->sub_stripes
;
1455 else if (mirror_num
)
1456 stripe_index
+= mirror_num
- 1;
1458 stripe_nr
= stripe_nr
/ factor
;
1459 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1461 multi
->num_stripes
= map
->num_stripes
;
1462 else if (mirror_num
)
1463 stripe_index
= mirror_num
- 1;
1464 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID5
|
1465 BTRFS_BLOCK_GROUP_RAID6
)) {
1470 u64 raid56_full_stripe_start
;
1471 u64 full_stripe_len
= nr_data_stripes(map
) * map
->stripe_len
;
1474 * align the start of our data stripe in the logical
1477 raid56_full_stripe_start
= offset
/ full_stripe_len
;
1478 raid56_full_stripe_start
*= full_stripe_len
;
1480 /* get the data stripe number */
1481 stripe_nr
= raid56_full_stripe_start
/ map
->stripe_len
;
1482 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1484 /* Work out the disk rotation on this stripe-set */
1485 rot
= stripe_nr
% map
->num_stripes
;
1487 /* Fill in the logical address of each stripe */
1488 tmp
= stripe_nr
* nr_data_stripes(map
);
1490 for (i
= 0; i
< nr_data_stripes(map
); i
++)
1491 raid_map
[(i
+rot
) % map
->num_stripes
] =
1492 ce
->start
+ (tmp
+ i
) * map
->stripe_len
;
1494 raid_map
[(i
+rot
) % map
->num_stripes
] = BTRFS_RAID5_P_STRIPE
;
1495 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
1496 raid_map
[(i
+rot
+1) % map
->num_stripes
] = BTRFS_RAID6_Q_STRIPE
;
1498 *length
= map
->stripe_len
;
1501 multi
->num_stripes
= map
->num_stripes
;
1503 stripe_index
= stripe_nr
% nr_data_stripes(map
);
1504 stripe_nr
= stripe_nr
/ nr_data_stripes(map
);
1507 * Mirror #0 or #1 means the original data block.
1508 * Mirror #2 is RAID5 parity block.
1509 * Mirror #3 is RAID6 Q block.
1512 stripe_index
= nr_data_stripes(map
) + mirror_num
- 2;
1514 /* We distribute the parity blocks across stripes */
1515 stripe_index
= (stripe_nr
+ stripe_index
) % map
->num_stripes
;
1519 * after this do_div call, stripe_nr is the number of stripes
1520 * on this device we have to walk to find the data, and
1521 * stripe_index is the number of our device in the stripe array
1523 stripe_index
= stripe_nr
% map
->num_stripes
;
1524 stripe_nr
= stripe_nr
/ map
->num_stripes
;
1526 BUG_ON(stripe_index
>= map
->num_stripes
);
1528 for (i
= 0; i
< multi
->num_stripes
; i
++) {
1529 multi
->stripes
[i
].physical
=
1530 map
->stripes
[stripe_index
].physical
+ stripe_offset
+
1531 stripe_nr
* map
->stripe_len
;
1532 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1541 sort_parity_stripes(multi
, raid_map
);
1542 *raid_map_ret
= raid_map
;
1548 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1551 struct btrfs_device
*device
;
1552 struct btrfs_fs_devices
*cur_devices
;
1554 cur_devices
= root
->fs_info
->fs_devices
;
1555 while (cur_devices
) {
1557 (!memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
) ||
1558 root
->fs_info
->ignore_fsid_mismatch
)) {
1559 device
= __find_device(&cur_devices
->devices
,
1564 cur_devices
= cur_devices
->seed
;
1569 struct btrfs_device
*
1570 btrfs_find_device_by_devid(struct btrfs_fs_devices
*fs_devices
,
1571 u64 devid
, int instance
)
1573 struct list_head
*head
= &fs_devices
->devices
;
1574 struct btrfs_device
*dev
;
1577 list_for_each_entry(dev
, head
, dev_list
) {
1578 if (dev
->devid
== devid
&& num_found
++ == instance
)
1584 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
1586 struct cache_extent
*ce
;
1587 struct map_lookup
*map
;
1588 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1593 * During chunk recovering, we may fail to find block group's
1594 * corresponding chunk, we will rebuild it later
1596 ce
= search_cache_extent(&map_tree
->cache_tree
, chunk_offset
);
1597 if (!root
->fs_info
->is_chunk_recover
)
1602 map
= container_of(ce
, struct map_lookup
, ce
);
1603 for (i
= 0; i
< map
->num_stripes
; i
++) {
1604 if (!map
->stripes
[i
].dev
->writeable
) {
1613 static struct btrfs_device
*fill_missing_device(u64 devid
)
1615 struct btrfs_device
*device
;
1617 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1618 device
->devid
= devid
;
1624 * slot == -1: SYSTEM chunk
1625 * return -EIO on error, otherwise return 0
1627 int btrfs_check_chunk_valid(struct btrfs_root
*root
,
1628 struct extent_buffer
*leaf
,
1629 struct btrfs_chunk
*chunk
,
1630 int slot
, u64 logical
)
1638 length
= btrfs_chunk_length(leaf
, chunk
);
1639 stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1640 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1641 sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1642 type
= btrfs_chunk_type(leaf
, chunk
);
1645 * These valid checks may be insufficient to cover every corner cases.
1647 if (!IS_ALIGNED(logical
, root
->sectorsize
)) {
1648 error("invalid chunk logical %llu", logical
);
1651 if (btrfs_chunk_sector_size(leaf
, chunk
) != root
->sectorsize
) {
1652 error("invalid chunk sectorsize %llu",
1653 (unsigned long long)btrfs_chunk_sector_size(leaf
, chunk
));
1656 if (!length
|| !IS_ALIGNED(length
, root
->sectorsize
)) {
1657 error("invalid chunk length %llu", length
);
1660 if (stripe_len
!= BTRFS_STRIPE_LEN
) {
1661 error("invalid chunk stripe length: %llu", stripe_len
);
1664 /* Check on chunk item type */
1665 if (slot
== -1 && (type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
1666 error("invalid chunk type %llu", type
);
1669 if (type
& ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1670 BTRFS_BLOCK_GROUP_PROFILE_MASK
)) {
1671 error("unrecognized chunk type: %llu",
1672 ~(BTRFS_BLOCK_GROUP_TYPE_MASK
|
1673 BTRFS_BLOCK_GROUP_PROFILE_MASK
) & type
);
1677 * Btrfs_chunk contains at least one stripe, and for sys_chunk
1678 * it can't exceed the system chunk array size
1679 * For normal chunk, it should match its chunk item size.
1681 if (num_stripes
< 1 ||
1682 (slot
== -1 && sizeof(struct btrfs_stripe
) * num_stripes
>
1683 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) ||
1684 (slot
>= 0 && sizeof(struct btrfs_stripe
) * (num_stripes
- 1) >
1685 btrfs_item_size_nr(leaf
, slot
))) {
1686 error("invalid num_stripes: %u", num_stripes
);
1690 * Device number check against profile
1692 if ((type
& BTRFS_BLOCK_GROUP_RAID10
&& sub_stripes
== 0) ||
1693 (type
& BTRFS_BLOCK_GROUP_RAID1
&& num_stripes
< 1) ||
1694 (type
& BTRFS_BLOCK_GROUP_RAID5
&& num_stripes
< 2) ||
1695 (type
& BTRFS_BLOCK_GROUP_RAID6
&& num_stripes
< 3) ||
1696 (type
& BTRFS_BLOCK_GROUP_DUP
&& num_stripes
> 2) ||
1697 ((type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) == 0 &&
1698 num_stripes
!= 1)) {
1699 error("Invalid num_stripes:sub_stripes %u:%u for profile %llu",
1700 num_stripes
, sub_stripes
,
1701 type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
);
1709 * Slot is used to verify the chunk item is valid
1711 * For sys chunk in superblock, pass -1 to indicate sys chunk.
1713 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1714 struct extent_buffer
*leaf
,
1715 struct btrfs_chunk
*chunk
, int slot
)
1717 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1718 struct map_lookup
*map
;
1719 struct cache_extent
*ce
;
1723 u8 uuid
[BTRFS_UUID_SIZE
];
1728 logical
= key
->offset
;
1729 length
= btrfs_chunk_length(leaf
, chunk
);
1730 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1731 /* Validation check */
1732 ret
= btrfs_check_chunk_valid(root
, leaf
, chunk
, slot
, logical
);
1734 error("%s checksums match, but it has an invalid chunk, %s",
1735 (slot
== -1) ? "Superblock" : "Metadata",
1736 (slot
== -1) ? "try btrfsck --repair -s <superblock> ie, 0,1,2" : "");
1740 ce
= search_cache_extent(&map_tree
->cache_tree
, logical
);
1742 /* already mapped? */
1743 if (ce
&& ce
->start
<= logical
&& ce
->start
+ ce
->size
> logical
) {
1747 map
= kmalloc(btrfs_map_lookup_size(num_stripes
), GFP_NOFS
);
1751 map
->ce
.start
= logical
;
1752 map
->ce
.size
= length
;
1753 map
->num_stripes
= num_stripes
;
1754 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1755 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1756 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1757 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1758 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1759 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1761 for (i
= 0; i
< num_stripes
; i
++) {
1762 map
->stripes
[i
].physical
=
1763 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1764 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1765 read_extent_buffer(leaf
, uuid
, (unsigned long)
1766 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1768 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
1770 if (!map
->stripes
[i
].dev
) {
1771 map
->stripes
[i
].dev
= fill_missing_device(devid
);
1772 printf("warning, device %llu is missing\n",
1773 (unsigned long long)devid
);
1774 list_add(&map
->stripes
[i
].dev
->dev_list
,
1775 &root
->fs_info
->fs_devices
->devices
);
1779 ret
= insert_cache_extent(&map_tree
->cache_tree
, &map
->ce
);
1785 static int fill_device_from_item(struct extent_buffer
*leaf
,
1786 struct btrfs_dev_item
*dev_item
,
1787 struct btrfs_device
*device
)
1791 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1792 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1793 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1794 device
->type
= btrfs_device_type(leaf
, dev_item
);
1795 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1796 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1797 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1799 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1800 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1805 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
1807 struct btrfs_fs_devices
*fs_devices
;
1810 fs_devices
= root
->fs_info
->fs_devices
->seed
;
1811 while (fs_devices
) {
1812 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
1816 fs_devices
= fs_devices
->seed
;
1819 fs_devices
= find_fsid(fsid
);
1821 /* missing all seed devices */
1822 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1827 INIT_LIST_HEAD(&fs_devices
->devices
);
1828 list_add(&fs_devices
->list
, &fs_uuids
);
1829 memcpy(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
);
1832 ret
= btrfs_open_devices(fs_devices
, O_RDONLY
);
1836 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
1837 root
->fs_info
->fs_devices
->seed
= fs_devices
;
1842 static int read_one_dev(struct btrfs_root
*root
,
1843 struct extent_buffer
*leaf
,
1844 struct btrfs_dev_item
*dev_item
)
1846 struct btrfs_device
*device
;
1849 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1850 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1852 devid
= btrfs_device_id(leaf
, dev_item
);
1853 read_extent_buffer(leaf
, dev_uuid
,
1854 (unsigned long)btrfs_device_uuid(dev_item
),
1856 read_extent_buffer(leaf
, fs_uuid
,
1857 (unsigned long)btrfs_device_fsid(dev_item
),
1860 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
1861 ret
= open_seed_devices(root
, fs_uuid
);
1866 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1868 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1872 list_add(&device
->dev_list
,
1873 &root
->fs_info
->fs_devices
->devices
);
1876 fill_device_from_item(leaf
, dev_item
, device
);
1877 device
->dev_root
= root
->fs_info
->dev_root
;
1881 int btrfs_read_sys_array(struct btrfs_root
*root
)
1883 struct btrfs_super_block
*super_copy
= root
->fs_info
->super_copy
;
1884 struct extent_buffer
*sb
;
1885 struct btrfs_disk_key
*disk_key
;
1886 struct btrfs_chunk
*chunk
;
1888 unsigned long sb_array_offset
;
1894 struct btrfs_key key
;
1896 sb
= btrfs_find_create_tree_block(root
->fs_info
,
1897 BTRFS_SUPER_INFO_OFFSET
,
1898 BTRFS_SUPER_INFO_SIZE
);
1901 btrfs_set_buffer_uptodate(sb
);
1902 write_extent_buffer(sb
, super_copy
, 0, sizeof(*super_copy
));
1903 array_size
= btrfs_super_sys_array_size(super_copy
);
1905 array_ptr
= super_copy
->sys_chunk_array
;
1906 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1909 while (cur_offset
< array_size
) {
1910 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
1911 len
= sizeof(*disk_key
);
1912 if (cur_offset
+ len
> array_size
)
1913 goto out_short_read
;
1915 btrfs_disk_key_to_cpu(&key
, disk_key
);
1918 sb_array_offset
+= len
;
1921 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1922 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
1924 * At least one btrfs_chunk with one stripe must be
1925 * present, exact stripe count check comes afterwards
1927 len
= btrfs_chunk_item_size(1);
1928 if (cur_offset
+ len
> array_size
)
1929 goto out_short_read
;
1931 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1934 "ERROR: invalid number of stripes %u in sys_array at offset %u\n",
1935 num_stripes
, cur_offset
);
1940 len
= btrfs_chunk_item_size(num_stripes
);
1941 if (cur_offset
+ len
> array_size
)
1942 goto out_short_read
;
1944 ret
= read_one_chunk(root
, &key
, sb
, chunk
, -1);
1949 "ERROR: unexpected item type %u in sys_array at offset %u\n",
1950 (u32
)key
.type
, cur_offset
);
1955 sb_array_offset
+= len
;
1958 free_extent_buffer(sb
);
1962 printk("ERROR: sys_array too short to read %u bytes at offset %u\n",
1964 free_extent_buffer(sb
);
1968 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1970 struct btrfs_path
*path
;
1971 struct extent_buffer
*leaf
;
1972 struct btrfs_key key
;
1973 struct btrfs_key found_key
;
1977 root
= root
->fs_info
->chunk_root
;
1979 path
= btrfs_alloc_path();
1984 * Read all device items, and then all the chunk items. All
1985 * device items are found before any chunk item (their object id
1986 * is smaller than the lowest possible object id for a chunk
1987 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
1989 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1992 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1996 leaf
= path
->nodes
[0];
1997 slot
= path
->slots
[0];
1998 if (slot
>= btrfs_header_nritems(leaf
)) {
1999 ret
= btrfs_next_leaf(root
, path
);
2006 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2007 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2008 struct btrfs_dev_item
*dev_item
;
2009 dev_item
= btrfs_item_ptr(leaf
, slot
,
2010 struct btrfs_dev_item
);
2011 ret
= read_one_dev(root
, leaf
, dev_item
);
2013 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2014 struct btrfs_chunk
*chunk
;
2015 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2016 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
,
2025 btrfs_free_path(path
);
2029 struct list_head
*btrfs_scanned_uuids(void)
2034 static int rmw_eb(struct btrfs_fs_info
*info
,
2035 struct extent_buffer
*eb
, struct extent_buffer
*orig_eb
)
2038 unsigned long orig_off
= 0;
2039 unsigned long dest_off
= 0;
2040 unsigned long copy_len
= eb
->len
;
2042 ret
= read_whole_eb(info
, eb
, 0);
2046 if (eb
->start
+ eb
->len
<= orig_eb
->start
||
2047 eb
->start
>= orig_eb
->start
+ orig_eb
->len
)
2050 * | ----- orig_eb ------- |
2051 * | ----- stripe ------- |
2052 * | ----- orig_eb ------- |
2053 * | ----- orig_eb ------- |
2055 if (eb
->start
> orig_eb
->start
)
2056 orig_off
= eb
->start
- orig_eb
->start
;
2057 if (orig_eb
->start
> eb
->start
)
2058 dest_off
= orig_eb
->start
- eb
->start
;
2060 if (copy_len
> orig_eb
->len
- orig_off
)
2061 copy_len
= orig_eb
->len
- orig_off
;
2062 if (copy_len
> eb
->len
- dest_off
)
2063 copy_len
= eb
->len
- dest_off
;
2065 memcpy(eb
->data
+ dest_off
, orig_eb
->data
+ orig_off
, copy_len
);
2069 static int split_eb_for_raid56(struct btrfs_fs_info
*info
,
2070 struct extent_buffer
*orig_eb
,
2071 struct extent_buffer
**ebs
,
2072 u64 stripe_len
, u64
*raid_map
,
2075 struct extent_buffer
**tmp_ebs
;
2076 u64 start
= orig_eb
->start
;
2081 tmp_ebs
= calloc(num_stripes
, sizeof(*tmp_ebs
));
2085 /* Alloc memory in a row for data stripes */
2086 for (i
= 0; i
< num_stripes
; i
++) {
2087 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2090 tmp_ebs
[i
] = calloc(1, sizeof(**tmp_ebs
) + stripe_len
);
2097 for (i
= 0; i
< num_stripes
; i
++) {
2098 struct extent_buffer
*eb
= tmp_ebs
[i
];
2100 if (raid_map
[i
] >= BTRFS_RAID5_P_STRIPE
)
2103 eb
->start
= raid_map
[i
];
2104 eb
->len
= stripe_len
;
2108 eb
->dev_bytenr
= (u64
)-1;
2110 this_eb_start
= raid_map
[i
];
2112 if (start
> this_eb_start
||
2113 start
+ orig_eb
->len
< this_eb_start
+ stripe_len
) {
2114 ret
= rmw_eb(info
, eb
, orig_eb
);
2118 memcpy(eb
->data
, orig_eb
->data
+ eb
->start
- start
,
2126 for (i
= 0; i
< num_stripes
; i
++)
2132 int write_raid56_with_parity(struct btrfs_fs_info
*info
,
2133 struct extent_buffer
*eb
,
2134 struct btrfs_multi_bio
*multi
,
2135 u64 stripe_len
, u64
*raid_map
)
2137 struct extent_buffer
**ebs
, *p_eb
= NULL
, *q_eb
= NULL
;
2140 int alloc_size
= eb
->len
;
2143 ebs
= malloc(sizeof(*ebs
) * multi
->num_stripes
);
2144 pointers
= malloc(sizeof(*pointers
) * multi
->num_stripes
);
2145 if (!ebs
|| !pointers
) {
2151 if (stripe_len
> alloc_size
)
2152 alloc_size
= stripe_len
;
2154 ret
= split_eb_for_raid56(info
, eb
, ebs
, stripe_len
, raid_map
,
2155 multi
->num_stripes
);
2159 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2160 struct extent_buffer
*new_eb
;
2161 if (raid_map
[i
] < BTRFS_RAID5_P_STRIPE
) {
2162 ebs
[i
]->dev_bytenr
= multi
->stripes
[i
].physical
;
2163 ebs
[i
]->fd
= multi
->stripes
[i
].dev
->fd
;
2164 multi
->stripes
[i
].dev
->total_ios
++;
2165 if (ebs
[i
]->start
!= raid_map
[i
]) {
2167 goto out_free_split
;
2171 new_eb
= malloc(sizeof(*eb
) + alloc_size
);
2174 goto out_free_split
;
2176 new_eb
->dev_bytenr
= multi
->stripes
[i
].physical
;
2177 new_eb
->fd
= multi
->stripes
[i
].dev
->fd
;
2178 multi
->stripes
[i
].dev
->total_ios
++;
2179 new_eb
->len
= stripe_len
;
2181 if (raid_map
[i
] == BTRFS_RAID5_P_STRIPE
)
2183 else if (raid_map
[i
] == BTRFS_RAID6_Q_STRIPE
)
2187 ebs
[multi
->num_stripes
- 2] = p_eb
;
2188 ebs
[multi
->num_stripes
- 1] = q_eb
;
2190 for (i
= 0; i
< multi
->num_stripes
; i
++)
2191 pointers
[i
] = ebs
[i
]->data
;
2193 raid6_gen_syndrome(multi
->num_stripes
, stripe_len
, pointers
);
2195 ebs
[multi
->num_stripes
- 1] = p_eb
;
2196 for (i
= 0; i
< multi
->num_stripes
; i
++)
2197 pointers
[i
] = ebs
[i
]->data
;
2198 ret
= raid5_gen_result(multi
->num_stripes
, stripe_len
,
2199 multi
->num_stripes
- 1, pointers
);
2201 goto out_free_split
;
2204 for (i
= 0; i
< multi
->num_stripes
; i
++) {
2205 ret
= write_extent_to_disk(ebs
[i
]);
2207 goto out_free_split
;
2211 for (i
= 0; i
< multi
->num_stripes
; i
++) {