2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
30 #include "async-thread.h"
40 struct btrfs_bio_stripe stripes
[];
43 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
44 (sizeof(struct btrfs_bio_stripe) * (n)))
46 static DEFINE_MUTEX(uuid_mutex
);
47 static LIST_HEAD(fs_uuids
);
49 void btrfs_lock_volumes(void)
51 mutex_lock(&uuid_mutex
);
54 void btrfs_unlock_volumes(void)
56 mutex_unlock(&uuid_mutex
);
59 static void lock_chunks(struct btrfs_root
*root
)
61 mutex_lock(&root
->fs_info
->alloc_mutex
);
62 mutex_lock(&root
->fs_info
->chunk_mutex
);
65 static void unlock_chunks(struct btrfs_root
*root
)
67 mutex_unlock(&root
->fs_info
->chunk_mutex
);
68 mutex_unlock(&root
->fs_info
->alloc_mutex
);
71 int btrfs_cleanup_fs_uuids(void)
73 struct btrfs_fs_devices
*fs_devices
;
74 struct list_head
*uuid_cur
;
75 struct list_head
*devices_cur
;
76 struct btrfs_device
*dev
;
78 list_for_each(uuid_cur
, &fs_uuids
) {
79 fs_devices
= list_entry(uuid_cur
, struct btrfs_fs_devices
,
81 while(!list_empty(&fs_devices
->devices
)) {
82 devices_cur
= fs_devices
->devices
.next
;
83 dev
= list_entry(devices_cur
, struct btrfs_device
,
86 close_bdev_excl(dev
->bdev
);
87 fs_devices
->open_devices
--;
89 list_del(&dev
->dev_list
);
97 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
100 struct btrfs_device
*dev
;
101 struct list_head
*cur
;
103 list_for_each(cur
, head
) {
104 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
105 if (dev
->devid
== devid
&&
106 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
113 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
115 struct list_head
*cur
;
116 struct btrfs_fs_devices
*fs_devices
;
118 list_for_each(cur
, &fs_uuids
) {
119 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
120 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
127 * we try to collect pending bios for a device so we don't get a large
128 * number of procs sending bios down to the same device. This greatly
129 * improves the schedulers ability to collect and merge the bios.
131 * But, it also turns into a long list of bios to process and that is sure
132 * to eventually make the worker thread block. The solution here is to
133 * make some progress and then put this work struct back at the end of
134 * the list if the block device is congested. This way, multiple devices
135 * can make progress from a single worker thread.
137 static int noinline
run_scheduled_bios(struct btrfs_device
*device
)
140 struct backing_dev_info
*bdi
;
141 struct btrfs_fs_info
*fs_info
;
145 unsigned long num_run
= 0;
148 bdi
= device
->bdev
->bd_inode
->i_mapping
->backing_dev_info
;
149 fs_info
= device
->dev_root
->fs_info
;
150 limit
= btrfs_async_submit_limit(fs_info
);
151 limit
= limit
* 2 / 3;
154 spin_lock(&device
->io_lock
);
156 /* take all the bios off the list at once and process them
157 * later on (without the lock held). But, remember the
158 * tail and other pointers so the bios can be properly reinserted
159 * into the list if we hit congestion
161 pending
= device
->pending_bios
;
162 tail
= device
->pending_bio_tail
;
163 WARN_ON(pending
&& !tail
);
164 device
->pending_bios
= NULL
;
165 device
->pending_bio_tail
= NULL
;
168 * if pending was null this time around, no bios need processing
169 * at all and we can stop. Otherwise it'll loop back up again
170 * and do an additional check so no bios are missed.
172 * device->running_pending is used to synchronize with the
177 device
->running_pending
= 1;
180 device
->running_pending
= 0;
182 spin_unlock(&device
->io_lock
);
186 pending
= pending
->bi_next
;
188 atomic_dec(&fs_info
->nr_async_bios
);
190 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
191 waitqueue_active(&fs_info
->async_submit_wait
))
192 wake_up(&fs_info
->async_submit_wait
);
194 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
196 submit_bio(cur
->bi_rw
, cur
);
201 * we made progress, there is more work to do and the bdi
202 * is now congested. Back off and let other work structs
205 if (pending
&& bdi_write_congested(bdi
)) {
206 struct bio
*old_head
;
208 spin_lock(&device
->io_lock
);
210 old_head
= device
->pending_bios
;
211 device
->pending_bios
= pending
;
212 if (device
->pending_bio_tail
)
213 tail
->bi_next
= old_head
;
215 device
->pending_bio_tail
= tail
;
217 spin_unlock(&device
->io_lock
);
218 btrfs_requeue_work(&device
->work
);
228 void pending_bios_fn(struct btrfs_work
*work
)
230 struct btrfs_device
*device
;
232 device
= container_of(work
, struct btrfs_device
, work
);
233 run_scheduled_bios(device
);
236 static noinline
int device_list_add(const char *path
,
237 struct btrfs_super_block
*disk_super
,
238 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
240 struct btrfs_device
*device
;
241 struct btrfs_fs_devices
*fs_devices
;
242 u64 found_transid
= btrfs_super_generation(disk_super
);
244 fs_devices
= find_fsid(disk_super
->fsid
);
246 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
249 INIT_LIST_HEAD(&fs_devices
->devices
);
250 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
251 list_add(&fs_devices
->list
, &fs_uuids
);
252 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
253 fs_devices
->latest_devid
= devid
;
254 fs_devices
->latest_trans
= found_transid
;
257 device
= __find_device(&fs_devices
->devices
, devid
,
258 disk_super
->dev_item
.uuid
);
261 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
263 /* we can safely leave the fs_devices entry around */
266 device
->devid
= devid
;
267 device
->work
.func
= pending_bios_fn
;
268 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
270 device
->barriers
= 1;
271 spin_lock_init(&device
->io_lock
);
272 device
->name
= kstrdup(path
, GFP_NOFS
);
277 list_add(&device
->dev_list
, &fs_devices
->devices
);
278 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
279 fs_devices
->num_devices
++;
282 if (found_transid
> fs_devices
->latest_trans
) {
283 fs_devices
->latest_devid
= devid
;
284 fs_devices
->latest_trans
= found_transid
;
286 *fs_devices_ret
= fs_devices
;
290 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
292 struct list_head
*head
= &fs_devices
->devices
;
293 struct list_head
*cur
;
294 struct btrfs_device
*device
;
296 mutex_lock(&uuid_mutex
);
298 list_for_each(cur
, head
) {
299 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
300 if (!device
->in_fs_metadata
) {
301 struct block_device
*bdev
;
302 list_del(&device
->dev_list
);
303 list_del(&device
->dev_alloc_list
);
304 fs_devices
->num_devices
--;
307 fs_devices
->open_devices
--;
308 mutex_unlock(&uuid_mutex
);
309 close_bdev_excl(bdev
);
310 mutex_lock(&uuid_mutex
);
317 mutex_unlock(&uuid_mutex
);
321 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
323 struct list_head
*head
= &fs_devices
->devices
;
324 struct list_head
*cur
;
325 struct btrfs_device
*device
;
327 mutex_lock(&uuid_mutex
);
328 list_for_each(cur
, head
) {
329 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
331 close_bdev_excl(device
->bdev
);
332 fs_devices
->open_devices
--;
335 device
->in_fs_metadata
= 0;
337 fs_devices
->mounted
= 0;
338 mutex_unlock(&uuid_mutex
);
342 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
343 int flags
, void *holder
)
345 struct block_device
*bdev
;
346 struct list_head
*head
= &fs_devices
->devices
;
347 struct list_head
*cur
;
348 struct btrfs_device
*device
;
349 struct block_device
*latest_bdev
= NULL
;
350 struct buffer_head
*bh
;
351 struct btrfs_super_block
*disk_super
;
352 u64 latest_devid
= 0;
353 u64 latest_transid
= 0;
358 mutex_lock(&uuid_mutex
);
359 if (fs_devices
->mounted
)
362 list_for_each(cur
, head
) {
363 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
370 bdev
= open_bdev_excl(device
->name
, flags
, holder
);
373 printk("open %s failed\n", device
->name
);
376 set_blocksize(bdev
, 4096);
378 bh
= __bread(bdev
, BTRFS_SUPER_INFO_OFFSET
/ 4096, 4096);
382 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
383 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
384 sizeof(disk_super
->magic
)))
387 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
388 if (devid
!= device
->devid
)
391 transid
= btrfs_super_generation(disk_super
);
392 if (!latest_transid
|| transid
> latest_transid
) {
393 latest_devid
= devid
;
394 latest_transid
= transid
;
399 device
->in_fs_metadata
= 0;
400 fs_devices
->open_devices
++;
406 close_bdev_excl(bdev
);
410 if (fs_devices
->open_devices
== 0) {
414 fs_devices
->mounted
= 1;
415 fs_devices
->latest_bdev
= latest_bdev
;
416 fs_devices
->latest_devid
= latest_devid
;
417 fs_devices
->latest_trans
= latest_transid
;
419 mutex_unlock(&uuid_mutex
);
423 int btrfs_scan_one_device(const char *path
, int flags
, void *holder
,
424 struct btrfs_fs_devices
**fs_devices_ret
)
426 struct btrfs_super_block
*disk_super
;
427 struct block_device
*bdev
;
428 struct buffer_head
*bh
;
433 mutex_lock(&uuid_mutex
);
435 bdev
= open_bdev_excl(path
, flags
, holder
);
442 ret
= set_blocksize(bdev
, 4096);
445 bh
= __bread(bdev
, BTRFS_SUPER_INFO_OFFSET
/ 4096, 4096);
450 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
451 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
452 sizeof(disk_super
->magic
))) {
456 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
457 transid
= btrfs_super_generation(disk_super
);
458 if (disk_super
->label
[0])
459 printk("device label %s ", disk_super
->label
);
461 /* FIXME, make a readl uuid parser */
462 printk("device fsid %llx-%llx ",
463 *(unsigned long long *)disk_super
->fsid
,
464 *(unsigned long long *)(disk_super
->fsid
+ 8));
466 printk("devid %Lu transid %Lu %s\n", devid
, transid
, path
);
467 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
472 close_bdev_excl(bdev
);
474 mutex_unlock(&uuid_mutex
);
479 * this uses a pretty simple search, the expectation is that it is
480 * called very infrequently and that a given device has a small number
483 static noinline
int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
484 struct btrfs_device
*device
,
485 struct btrfs_path
*path
,
486 u64 num_bytes
, u64
*start
)
488 struct btrfs_key key
;
489 struct btrfs_root
*root
= device
->dev_root
;
490 struct btrfs_dev_extent
*dev_extent
= NULL
;
493 u64 search_start
= 0;
494 u64 search_end
= device
->total_bytes
;
498 struct extent_buffer
*l
;
503 /* FIXME use last free of some kind */
505 /* we don't want to overwrite the superblock on the drive,
506 * so we make sure to start at an offset of at least 1MB
508 search_start
= max((u64
)1024 * 1024, search_start
);
510 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
511 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
513 key
.objectid
= device
->devid
;
514 key
.offset
= search_start
;
515 key
.type
= BTRFS_DEV_EXTENT_KEY
;
516 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
519 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
523 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
526 slot
= path
->slots
[0];
527 if (slot
>= btrfs_header_nritems(l
)) {
528 ret
= btrfs_next_leaf(root
, path
);
535 if (search_start
>= search_end
) {
539 *start
= search_start
;
543 *start
= last_byte
> search_start
?
544 last_byte
: search_start
;
545 if (search_end
<= *start
) {
551 btrfs_item_key_to_cpu(l
, &key
, slot
);
553 if (key
.objectid
< device
->devid
)
556 if (key
.objectid
> device
->devid
)
559 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
561 if (last_byte
< search_start
)
562 last_byte
= search_start
;
563 hole_size
= key
.offset
- last_byte
;
564 if (key
.offset
> last_byte
&&
565 hole_size
>= num_bytes
) {
570 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
575 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
576 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
582 /* we have to make sure we didn't find an extent that has already
583 * been allocated by the map tree or the original allocation
585 btrfs_release_path(root
, path
);
586 BUG_ON(*start
< search_start
);
588 if (*start
+ num_bytes
> search_end
) {
592 /* check for pending inserts here */
596 btrfs_release_path(root
, path
);
600 int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
601 struct btrfs_device
*device
,
605 struct btrfs_path
*path
;
606 struct btrfs_root
*root
= device
->dev_root
;
607 struct btrfs_key key
;
608 struct btrfs_key found_key
;
609 struct extent_buffer
*leaf
= NULL
;
610 struct btrfs_dev_extent
*extent
= NULL
;
612 path
= btrfs_alloc_path();
616 key
.objectid
= device
->devid
;
618 key
.type
= BTRFS_DEV_EXTENT_KEY
;
620 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
622 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
623 BTRFS_DEV_EXTENT_KEY
);
625 leaf
= path
->nodes
[0];
626 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
627 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
628 struct btrfs_dev_extent
);
629 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
630 btrfs_dev_extent_length(leaf
, extent
) < start
);
632 } else if (ret
== 0) {
633 leaf
= path
->nodes
[0];
634 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
635 struct btrfs_dev_extent
);
639 if (device
->bytes_used
> 0)
640 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
641 ret
= btrfs_del_item(trans
, root
, path
);
644 btrfs_free_path(path
);
648 int noinline
btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
649 struct btrfs_device
*device
,
650 u64 chunk_tree
, u64 chunk_objectid
,
652 u64 num_bytes
, u64
*start
)
655 struct btrfs_path
*path
;
656 struct btrfs_root
*root
= device
->dev_root
;
657 struct btrfs_dev_extent
*extent
;
658 struct extent_buffer
*leaf
;
659 struct btrfs_key key
;
661 WARN_ON(!device
->in_fs_metadata
);
662 path
= btrfs_alloc_path();
666 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
671 key
.objectid
= device
->devid
;
673 key
.type
= BTRFS_DEV_EXTENT_KEY
;
674 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
678 leaf
= path
->nodes
[0];
679 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
680 struct btrfs_dev_extent
);
681 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
682 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
683 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
685 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
686 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
689 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
690 btrfs_mark_buffer_dirty(leaf
);
692 btrfs_free_path(path
);
696 static noinline
int find_next_chunk(struct btrfs_root
*root
,
697 u64 objectid
, u64
*offset
)
699 struct btrfs_path
*path
;
701 struct btrfs_key key
;
702 struct btrfs_chunk
*chunk
;
703 struct btrfs_key found_key
;
705 path
= btrfs_alloc_path();
708 key
.objectid
= objectid
;
709 key
.offset
= (u64
)-1;
710 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
712 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
718 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
722 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
724 if (found_key
.objectid
!= objectid
)
727 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
729 *offset
= found_key
.offset
+
730 btrfs_chunk_length(path
->nodes
[0], chunk
);
735 btrfs_free_path(path
);
739 static noinline
int find_next_devid(struct btrfs_root
*root
,
740 struct btrfs_path
*path
, u64
*objectid
)
743 struct btrfs_key key
;
744 struct btrfs_key found_key
;
746 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
747 key
.type
= BTRFS_DEV_ITEM_KEY
;
748 key
.offset
= (u64
)-1;
750 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
756 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
761 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
763 *objectid
= found_key
.offset
+ 1;
767 btrfs_release_path(root
, path
);
772 * the device information is stored in the chunk root
773 * the btrfs_device struct should be fully filled in
775 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
776 struct btrfs_root
*root
,
777 struct btrfs_device
*device
)
780 struct btrfs_path
*path
;
781 struct btrfs_dev_item
*dev_item
;
782 struct extent_buffer
*leaf
;
783 struct btrfs_key key
;
787 root
= root
->fs_info
->chunk_root
;
789 path
= btrfs_alloc_path();
793 ret
= find_next_devid(root
, path
, &free_devid
);
797 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
798 key
.type
= BTRFS_DEV_ITEM_KEY
;
799 key
.offset
= free_devid
;
801 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
806 leaf
= path
->nodes
[0];
807 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
809 device
->devid
= free_devid
;
810 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
811 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
812 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
813 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
814 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
815 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
816 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
817 btrfs_set_device_group(leaf
, dev_item
, 0);
818 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
819 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
821 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
822 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
823 btrfs_mark_buffer_dirty(leaf
);
827 btrfs_free_path(path
);
831 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
832 struct btrfs_device
*device
)
835 struct btrfs_path
*path
;
836 struct block_device
*bdev
= device
->bdev
;
837 struct btrfs_device
*next_dev
;
838 struct btrfs_key key
;
840 struct btrfs_fs_devices
*fs_devices
;
841 struct btrfs_trans_handle
*trans
;
843 root
= root
->fs_info
->chunk_root
;
845 path
= btrfs_alloc_path();
849 trans
= btrfs_start_transaction(root
, 1);
850 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
851 key
.type
= BTRFS_DEV_ITEM_KEY
;
852 key
.offset
= device
->devid
;
855 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
864 ret
= btrfs_del_item(trans
, root
, path
);
869 * at this point, the device is zero sized. We want to
870 * remove it from the devices list and zero out the old super
872 list_del_init(&device
->dev_list
);
873 list_del_init(&device
->dev_alloc_list
);
874 fs_devices
= root
->fs_info
->fs_devices
;
876 next_dev
= list_entry(fs_devices
->devices
.next
, struct btrfs_device
,
878 if (bdev
== root
->fs_info
->sb
->s_bdev
)
879 root
->fs_info
->sb
->s_bdev
= next_dev
->bdev
;
880 if (bdev
== fs_devices
->latest_bdev
)
881 fs_devices
->latest_bdev
= next_dev
->bdev
;
883 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
884 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
887 btrfs_free_path(path
);
889 btrfs_commit_transaction(trans
, root
);
893 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
895 struct btrfs_device
*device
;
896 struct block_device
*bdev
;
897 struct buffer_head
*bh
= NULL
;
898 struct btrfs_super_block
*disk_super
;
903 mutex_lock(&uuid_mutex
);
904 mutex_lock(&root
->fs_info
->volume_mutex
);
906 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
907 root
->fs_info
->avail_system_alloc_bits
|
908 root
->fs_info
->avail_metadata_alloc_bits
;
910 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
911 btrfs_super_num_devices(&root
->fs_info
->super_copy
) <= 4) {
912 printk("btrfs: unable to go below four devices on raid10\n");
917 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
918 btrfs_super_num_devices(&root
->fs_info
->super_copy
) <= 2) {
919 printk("btrfs: unable to go below two devices on raid1\n");
924 if (strcmp(device_path
, "missing") == 0) {
925 struct list_head
*cur
;
926 struct list_head
*devices
;
927 struct btrfs_device
*tmp
;
930 devices
= &root
->fs_info
->fs_devices
->devices
;
931 list_for_each(cur
, devices
) {
932 tmp
= list_entry(cur
, struct btrfs_device
, dev_list
);
933 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
942 printk("btrfs: no missing devices found to remove\n");
947 bdev
= open_bdev_excl(device_path
, 0,
948 root
->fs_info
->bdev_holder
);
954 bh
= __bread(bdev
, BTRFS_SUPER_INFO_OFFSET
/ 4096, 4096);
959 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
960 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
961 sizeof(disk_super
->magic
))) {
965 if (memcmp(disk_super
->fsid
, root
->fs_info
->fsid
,
970 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
971 device
= btrfs_find_device(root
, devid
, NULL
);
978 root
->fs_info
->fs_devices
->num_devices
--;
979 root
->fs_info
->fs_devices
->open_devices
--;
981 ret
= btrfs_shrink_device(device
, 0);
986 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
991 /* make sure this device isn't detected as part of
994 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
995 set_buffer_dirty(bh
);
996 sync_dirty_buffer(bh
);
1002 /* one close for the device struct or super_block */
1003 close_bdev_excl(device
->bdev
);
1006 /* one close for us */
1007 close_bdev_excl(bdev
);
1009 kfree(device
->name
);
1018 close_bdev_excl(bdev
);
1020 mutex_unlock(&root
->fs_info
->volume_mutex
);
1021 mutex_unlock(&uuid_mutex
);
1025 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1027 struct btrfs_trans_handle
*trans
;
1028 struct btrfs_device
*device
;
1029 struct block_device
*bdev
;
1030 struct list_head
*cur
;
1031 struct list_head
*devices
;
1036 bdev
= open_bdev_excl(device_path
, 0, root
->fs_info
->bdev_holder
);
1041 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1042 mutex_lock(&root
->fs_info
->volume_mutex
);
1044 trans
= btrfs_start_transaction(root
, 1);
1046 devices
= &root
->fs_info
->fs_devices
->devices
;
1047 list_for_each(cur
, devices
) {
1048 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1049 if (device
->bdev
== bdev
) {
1055 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1057 /* we can safely leave the fs_devices entry around */
1059 goto out_close_bdev
;
1062 device
->barriers
= 1;
1063 device
->work
.func
= pending_bios_fn
;
1064 generate_random_uuid(device
->uuid
);
1065 spin_lock_init(&device
->io_lock
);
1066 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1067 if (!device
->name
) {
1069 goto out_close_bdev
;
1071 device
->io_width
= root
->sectorsize
;
1072 device
->io_align
= root
->sectorsize
;
1073 device
->sector_size
= root
->sectorsize
;
1074 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1075 device
->dev_root
= root
->fs_info
->dev_root
;
1076 device
->bdev
= bdev
;
1077 device
->in_fs_metadata
= 1;
1079 ret
= btrfs_add_device(trans
, root
, device
);
1081 goto out_close_bdev
;
1083 set_blocksize(device
->bdev
, 4096);
1085 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1086 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1087 total_bytes
+ device
->total_bytes
);
1089 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1090 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1093 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1094 list_add(&device
->dev_alloc_list
,
1095 &root
->fs_info
->fs_devices
->alloc_list
);
1096 root
->fs_info
->fs_devices
->num_devices
++;
1097 root
->fs_info
->fs_devices
->open_devices
++;
1099 unlock_chunks(root
);
1100 btrfs_end_transaction(trans
, root
);
1101 mutex_unlock(&root
->fs_info
->volume_mutex
);
1106 close_bdev_excl(bdev
);
1110 int noinline
btrfs_update_device(struct btrfs_trans_handle
*trans
,
1111 struct btrfs_device
*device
)
1114 struct btrfs_path
*path
;
1115 struct btrfs_root
*root
;
1116 struct btrfs_dev_item
*dev_item
;
1117 struct extent_buffer
*leaf
;
1118 struct btrfs_key key
;
1120 root
= device
->dev_root
->fs_info
->chunk_root
;
1122 path
= btrfs_alloc_path();
1126 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1127 key
.type
= BTRFS_DEV_ITEM_KEY
;
1128 key
.offset
= device
->devid
;
1130 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1139 leaf
= path
->nodes
[0];
1140 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1142 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1143 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1144 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1145 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1146 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1147 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1148 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1149 btrfs_mark_buffer_dirty(leaf
);
1152 btrfs_free_path(path
);
1156 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1157 struct btrfs_device
*device
, u64 new_size
)
1159 struct btrfs_super_block
*super_copy
=
1160 &device
->dev_root
->fs_info
->super_copy
;
1161 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1162 u64 diff
= new_size
- device
->total_bytes
;
1164 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1165 return btrfs_update_device(trans
, device
);
1168 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1169 struct btrfs_device
*device
, u64 new_size
)
1172 lock_chunks(device
->dev_root
);
1173 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1174 unlock_chunks(device
->dev_root
);
1178 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1179 struct btrfs_root
*root
,
1180 u64 chunk_tree
, u64 chunk_objectid
,
1184 struct btrfs_path
*path
;
1185 struct btrfs_key key
;
1187 root
= root
->fs_info
->chunk_root
;
1188 path
= btrfs_alloc_path();
1192 key
.objectid
= chunk_objectid
;
1193 key
.offset
= chunk_offset
;
1194 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1196 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1199 ret
= btrfs_del_item(trans
, root
, path
);
1202 btrfs_free_path(path
);
1206 int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1209 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1210 struct btrfs_disk_key
*disk_key
;
1211 struct btrfs_chunk
*chunk
;
1218 struct btrfs_key key
;
1220 array_size
= btrfs_super_sys_array_size(super_copy
);
1222 ptr
= super_copy
->sys_chunk_array
;
1225 while (cur
< array_size
) {
1226 disk_key
= (struct btrfs_disk_key
*)ptr
;
1227 btrfs_disk_key_to_cpu(&key
, disk_key
);
1229 len
= sizeof(*disk_key
);
1231 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1232 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1233 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1234 len
+= btrfs_chunk_item_size(num_stripes
);
1239 if (key
.objectid
== chunk_objectid
&&
1240 key
.offset
== chunk_offset
) {
1241 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1243 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1253 int btrfs_relocate_chunk(struct btrfs_root
*root
,
1254 u64 chunk_tree
, u64 chunk_objectid
,
1257 struct extent_map_tree
*em_tree
;
1258 struct btrfs_root
*extent_root
;
1259 struct btrfs_trans_handle
*trans
;
1260 struct extent_map
*em
;
1261 struct map_lookup
*map
;
1265 printk("btrfs relocating chunk %llu\n",
1266 (unsigned long long)chunk_offset
);
1267 root
= root
->fs_info
->chunk_root
;
1268 extent_root
= root
->fs_info
->extent_root
;
1269 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1271 /* step one, relocate all the extents inside this chunk */
1272 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1275 trans
= btrfs_start_transaction(root
, 1);
1281 * step two, delete the device extents and the
1282 * chunk tree entries
1284 spin_lock(&em_tree
->lock
);
1285 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1286 spin_unlock(&em_tree
->lock
);
1288 BUG_ON(em
->start
> chunk_offset
||
1289 em
->start
+ em
->len
< chunk_offset
);
1290 map
= (struct map_lookup
*)em
->bdev
;
1292 for (i
= 0; i
< map
->num_stripes
; i
++) {
1293 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1294 map
->stripes
[i
].physical
);
1297 if (map
->stripes
[i
].dev
) {
1298 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1302 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1307 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1308 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1312 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1315 spin_lock(&em_tree
->lock
);
1316 remove_extent_mapping(em_tree
, em
);
1317 spin_unlock(&em_tree
->lock
);
1322 /* once for the tree */
1323 free_extent_map(em
);
1325 free_extent_map(em
);
1327 unlock_chunks(root
);
1328 btrfs_end_transaction(trans
, root
);
1332 static u64
div_factor(u64 num
, int factor
)
1342 int btrfs_balance(struct btrfs_root
*dev_root
)
1345 struct list_head
*cur
;
1346 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
1347 struct btrfs_device
*device
;
1350 struct btrfs_path
*path
;
1351 struct btrfs_key key
;
1352 struct btrfs_chunk
*chunk
;
1353 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
1354 struct btrfs_trans_handle
*trans
;
1355 struct btrfs_key found_key
;
1358 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
1359 dev_root
= dev_root
->fs_info
->dev_root
;
1361 /* step one make some room on all the devices */
1362 list_for_each(cur
, devices
) {
1363 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1364 old_size
= device
->total_bytes
;
1365 size_to_free
= div_factor(old_size
, 1);
1366 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
1367 if (device
->total_bytes
- device
->bytes_used
> size_to_free
)
1370 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
1373 trans
= btrfs_start_transaction(dev_root
, 1);
1376 ret
= btrfs_grow_device(trans
, device
, old_size
);
1379 btrfs_end_transaction(trans
, dev_root
);
1382 /* step two, relocate all the chunks */
1383 path
= btrfs_alloc_path();
1386 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1387 key
.offset
= (u64
)-1;
1388 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1391 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1396 * this shouldn't happen, it means the last relocate
1402 ret
= btrfs_previous_item(chunk_root
, path
, 0,
1403 BTRFS_CHUNK_ITEM_KEY
);
1407 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1409 if (found_key
.objectid
!= key
.objectid
)
1412 chunk
= btrfs_item_ptr(path
->nodes
[0],
1414 struct btrfs_chunk
);
1415 key
.offset
= found_key
.offset
;
1416 /* chunk zero is special */
1417 if (key
.offset
== 0)
1420 btrfs_release_path(chunk_root
, path
);
1421 ret
= btrfs_relocate_chunk(chunk_root
,
1422 chunk_root
->root_key
.objectid
,
1429 btrfs_free_path(path
);
1430 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
1435 * shrinking a device means finding all of the device extents past
1436 * the new size, and then following the back refs to the chunks.
1437 * The chunk relocation code actually frees the device extent
1439 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
1441 struct btrfs_trans_handle
*trans
;
1442 struct btrfs_root
*root
= device
->dev_root
;
1443 struct btrfs_dev_extent
*dev_extent
= NULL
;
1444 struct btrfs_path
*path
;
1451 struct extent_buffer
*l
;
1452 struct btrfs_key key
;
1453 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1454 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1455 u64 diff
= device
->total_bytes
- new_size
;
1458 path
= btrfs_alloc_path();
1462 trans
= btrfs_start_transaction(root
, 1);
1472 device
->total_bytes
= new_size
;
1473 ret
= btrfs_update_device(trans
, device
);
1475 unlock_chunks(root
);
1476 btrfs_end_transaction(trans
, root
);
1479 WARN_ON(diff
> old_total
);
1480 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
1481 unlock_chunks(root
);
1482 btrfs_end_transaction(trans
, root
);
1484 key
.objectid
= device
->devid
;
1485 key
.offset
= (u64
)-1;
1486 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1489 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1493 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
1502 slot
= path
->slots
[0];
1503 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
1505 if (key
.objectid
!= device
->devid
)
1508 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1509 length
= btrfs_dev_extent_length(l
, dev_extent
);
1511 if (key
.offset
+ length
<= new_size
)
1514 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
1515 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
1516 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
1517 btrfs_release_path(root
, path
);
1519 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
1526 btrfs_free_path(path
);
1530 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
1531 struct btrfs_root
*root
,
1532 struct btrfs_key
*key
,
1533 struct btrfs_chunk
*chunk
, int item_size
)
1535 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1536 struct btrfs_disk_key disk_key
;
1540 array_size
= btrfs_super_sys_array_size(super_copy
);
1541 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
1544 ptr
= super_copy
->sys_chunk_array
+ array_size
;
1545 btrfs_cpu_key_to_disk(&disk_key
, key
);
1546 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
1547 ptr
+= sizeof(disk_key
);
1548 memcpy(ptr
, chunk
, item_size
);
1549 item_size
+= sizeof(disk_key
);
1550 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
1554 static u64 noinline
chunk_bytes_by_type(u64 type
, u64 calc_size
,
1555 int num_stripes
, int sub_stripes
)
1557 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
1559 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1560 return calc_size
* (num_stripes
/ sub_stripes
);
1562 return calc_size
* num_stripes
;
1566 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
1567 struct btrfs_root
*extent_root
, u64
*start
,
1568 u64
*num_bytes
, u64 type
)
1571 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1572 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
1573 struct btrfs_path
*path
;
1574 struct btrfs_stripe
*stripes
;
1575 struct btrfs_device
*device
= NULL
;
1576 struct btrfs_chunk
*chunk
;
1577 struct list_head private_devs
;
1578 struct list_head
*dev_list
;
1579 struct list_head
*cur
;
1580 struct extent_map_tree
*em_tree
;
1581 struct map_lookup
*map
;
1582 struct extent_map
*em
;
1583 int min_stripe_size
= 1 * 1024 * 1024;
1585 u64 calc_size
= 1024 * 1024 * 1024;
1586 u64 max_chunk_size
= calc_size
;
1591 int num_stripes
= 1;
1592 int min_stripes
= 1;
1593 int sub_stripes
= 0;
1597 int stripe_len
= 64 * 1024;
1598 struct btrfs_key key
;
1600 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
1601 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
1603 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
1605 dev_list
= &extent_root
->fs_info
->fs_devices
->alloc_list
;
1606 if (list_empty(dev_list
))
1609 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
1610 num_stripes
= extent_root
->fs_info
->fs_devices
->open_devices
;
1613 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
1617 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
1618 num_stripes
= min_t(u64
, 2,
1619 extent_root
->fs_info
->fs_devices
->open_devices
);
1620 if (num_stripes
< 2)
1624 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1625 num_stripes
= extent_root
->fs_info
->fs_devices
->open_devices
;
1626 if (num_stripes
< 4)
1628 num_stripes
&= ~(u32
)1;
1633 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
1634 max_chunk_size
= 10 * calc_size
;
1635 min_stripe_size
= 64 * 1024 * 1024;
1636 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
1637 max_chunk_size
= 4 * calc_size
;
1638 min_stripe_size
= 32 * 1024 * 1024;
1639 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1640 calc_size
= 8 * 1024 * 1024;
1641 max_chunk_size
= calc_size
* 2;
1642 min_stripe_size
= 1 * 1024 * 1024;
1645 path
= btrfs_alloc_path();
1649 /* we don't want a chunk larger than 10% of the FS */
1650 percent_max
= div_factor(btrfs_super_total_bytes(&info
->super_copy
), 1);
1651 max_chunk_size
= min(percent_max
, max_chunk_size
);
1654 if (calc_size
* num_stripes
> max_chunk_size
) {
1655 calc_size
= max_chunk_size
;
1656 do_div(calc_size
, num_stripes
);
1657 do_div(calc_size
, stripe_len
);
1658 calc_size
*= stripe_len
;
1660 /* we don't want tiny stripes */
1661 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
1663 do_div(calc_size
, stripe_len
);
1664 calc_size
*= stripe_len
;
1666 INIT_LIST_HEAD(&private_devs
);
1667 cur
= dev_list
->next
;
1670 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1671 min_free
= calc_size
* 2;
1673 min_free
= calc_size
;
1676 * we add 1MB because we never use the first 1MB of the device, unless
1677 * we've looped, then we are likely allocating the maximum amount of
1678 * space left already
1681 min_free
+= 1024 * 1024;
1683 /* build a private list of devices we will allocate from */
1684 while(index
< num_stripes
) {
1685 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
1687 if (device
->total_bytes
> device
->bytes_used
)
1688 avail
= device
->total_bytes
- device
->bytes_used
;
1693 if (device
->in_fs_metadata
&& avail
>= min_free
) {
1694 u64 ignored_start
= 0;
1695 ret
= find_free_dev_extent(trans
, device
, path
,
1699 list_move_tail(&device
->dev_alloc_list
,
1702 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1705 } else if (device
->in_fs_metadata
&& avail
> max_avail
)
1707 if (cur
== dev_list
)
1710 if (index
< num_stripes
) {
1711 list_splice(&private_devs
, dev_list
);
1712 if (index
>= min_stripes
) {
1713 num_stripes
= index
;
1714 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1715 num_stripes
/= sub_stripes
;
1716 num_stripes
*= sub_stripes
;
1721 if (!looped
&& max_avail
> 0) {
1723 calc_size
= max_avail
;
1726 btrfs_free_path(path
);
1729 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1730 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1731 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1734 btrfs_free_path(path
);
1738 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1740 btrfs_free_path(path
);
1744 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1747 btrfs_free_path(path
);
1750 btrfs_free_path(path
);
1753 stripes
= &chunk
->stripe
;
1754 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1755 num_stripes
, sub_stripes
);
1758 while(index
< num_stripes
) {
1759 struct btrfs_stripe
*stripe
;
1760 BUG_ON(list_empty(&private_devs
));
1761 cur
= private_devs
.next
;
1762 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
1764 /* loop over this device again if we're doing a dup group */
1765 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1766 (index
== num_stripes
- 1))
1767 list_move_tail(&device
->dev_alloc_list
, dev_list
);
1769 ret
= btrfs_alloc_dev_extent(trans
, device
,
1770 info
->chunk_root
->root_key
.objectid
,
1771 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1772 calc_size
, &dev_offset
);
1774 device
->bytes_used
+= calc_size
;
1775 ret
= btrfs_update_device(trans
, device
);
1778 map
->stripes
[index
].dev
= device
;
1779 map
->stripes
[index
].physical
= dev_offset
;
1780 stripe
= stripes
+ index
;
1781 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1782 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1783 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1784 physical
= dev_offset
;
1787 BUG_ON(!list_empty(&private_devs
));
1789 /* key was set above */
1790 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1791 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1792 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1793 btrfs_set_stack_chunk_type(chunk
, type
);
1794 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1795 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1796 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1797 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1798 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1799 map
->sector_size
= extent_root
->sectorsize
;
1800 map
->stripe_len
= stripe_len
;
1801 map
->io_align
= stripe_len
;
1802 map
->io_width
= stripe_len
;
1804 map
->num_stripes
= num_stripes
;
1805 map
->sub_stripes
= sub_stripes
;
1807 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1808 btrfs_chunk_item_size(num_stripes
));
1810 *start
= key
.offset
;;
1812 em
= alloc_extent_map(GFP_NOFS
);
1815 em
->bdev
= (struct block_device
*)map
;
1816 em
->start
= key
.offset
;
1817 em
->len
= *num_bytes
;
1818 em
->block_start
= 0;
1819 em
->block_len
= em
->len
;
1821 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1822 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
1823 chunk
, btrfs_chunk_item_size(num_stripes
));
1828 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
1829 spin_lock(&em_tree
->lock
);
1830 ret
= add_extent_mapping(em_tree
, em
);
1831 spin_unlock(&em_tree
->lock
);
1833 free_extent_map(em
);
1837 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
1839 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
1842 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
1844 struct extent_map
*em
;
1847 spin_lock(&tree
->map_tree
.lock
);
1848 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
1850 remove_extent_mapping(&tree
->map_tree
, em
);
1851 spin_unlock(&tree
->map_tree
.lock
);
1856 free_extent_map(em
);
1857 /* once for the tree */
1858 free_extent_map(em
);
1862 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
1864 struct extent_map
*em
;
1865 struct map_lookup
*map
;
1866 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
1869 spin_lock(&em_tree
->lock
);
1870 em
= lookup_extent_mapping(em_tree
, logical
, len
);
1871 spin_unlock(&em_tree
->lock
);
1874 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
1875 map
= (struct map_lookup
*)em
->bdev
;
1876 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1877 ret
= map
->num_stripes
;
1878 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1879 ret
= map
->sub_stripes
;
1882 free_extent_map(em
);
1886 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
1890 if (map
->stripes
[optimal
].dev
->bdev
)
1892 for (i
= first
; i
< first
+ num
; i
++) {
1893 if (map
->stripes
[i
].dev
->bdev
)
1896 /* we couldn't find one that doesn't fail. Just return something
1897 * and the io error handling code will clean up eventually
1902 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1903 u64 logical
, u64
*length
,
1904 struct btrfs_multi_bio
**multi_ret
,
1905 int mirror_num
, struct page
*unplug_page
)
1907 struct extent_map
*em
;
1908 struct map_lookup
*map
;
1909 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
1913 int stripes_allocated
= 8;
1914 int stripes_required
= 1;
1919 struct btrfs_multi_bio
*multi
= NULL
;
1921 if (multi_ret
&& !(rw
& (1 << BIO_RW
))) {
1922 stripes_allocated
= 1;
1926 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1931 atomic_set(&multi
->error
, 0);
1934 spin_lock(&em_tree
->lock
);
1935 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
1936 spin_unlock(&em_tree
->lock
);
1938 if (!em
&& unplug_page
)
1942 printk("unable to find logical %Lu len %Lu\n", logical
, *length
);
1946 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
1947 map
= (struct map_lookup
*)em
->bdev
;
1948 offset
= logical
- em
->start
;
1950 if (mirror_num
> map
->num_stripes
)
1953 /* if our multi bio struct is too small, back off and try again */
1954 if (rw
& (1 << BIO_RW
)) {
1955 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1956 BTRFS_BLOCK_GROUP_DUP
)) {
1957 stripes_required
= map
->num_stripes
;
1959 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1960 stripes_required
= map
->sub_stripes
;
1964 if (multi_ret
&& rw
== WRITE
&&
1965 stripes_allocated
< stripes_required
) {
1966 stripes_allocated
= map
->num_stripes
;
1967 free_extent_map(em
);
1973 * stripe_nr counts the total number of stripes we have to stride
1974 * to get to this block
1976 do_div(stripe_nr
, map
->stripe_len
);
1978 stripe_offset
= stripe_nr
* map
->stripe_len
;
1979 BUG_ON(offset
< stripe_offset
);
1981 /* stripe_offset is the offset of this block in its stripe*/
1982 stripe_offset
= offset
- stripe_offset
;
1984 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1985 BTRFS_BLOCK_GROUP_RAID10
|
1986 BTRFS_BLOCK_GROUP_DUP
)) {
1987 /* we limit the length of each bio to what fits in a stripe */
1988 *length
= min_t(u64
, em
->len
- offset
,
1989 map
->stripe_len
- stripe_offset
);
1991 *length
= em
->len
- offset
;
1994 if (!multi_ret
&& !unplug_page
)
1999 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2000 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2001 num_stripes
= map
->num_stripes
;
2002 else if (mirror_num
)
2003 stripe_index
= mirror_num
- 1;
2005 stripe_index
= find_live_mirror(map
, 0,
2007 current
->pid
% map
->num_stripes
);
2010 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2011 if (rw
& (1 << BIO_RW
))
2012 num_stripes
= map
->num_stripes
;
2013 else if (mirror_num
)
2014 stripe_index
= mirror_num
- 1;
2016 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2017 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2019 stripe_index
= do_div(stripe_nr
, factor
);
2020 stripe_index
*= map
->sub_stripes
;
2022 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2023 num_stripes
= map
->sub_stripes
;
2024 else if (mirror_num
)
2025 stripe_index
+= mirror_num
- 1;
2027 stripe_index
= find_live_mirror(map
, stripe_index
,
2028 map
->sub_stripes
, stripe_index
+
2029 current
->pid
% map
->sub_stripes
);
2033 * after this do_div call, stripe_nr is the number of stripes
2034 * on this device we have to walk to find the data, and
2035 * stripe_index is the number of our device in the stripe array
2037 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
2039 BUG_ON(stripe_index
>= map
->num_stripes
);
2041 for (i
= 0; i
< num_stripes
; i
++) {
2043 struct btrfs_device
*device
;
2044 struct backing_dev_info
*bdi
;
2046 device
= map
->stripes
[stripe_index
].dev
;
2048 bdi
= blk_get_backing_dev_info(device
->bdev
);
2049 if (bdi
->unplug_io_fn
) {
2050 bdi
->unplug_io_fn(bdi
, unplug_page
);
2054 multi
->stripes
[i
].physical
=
2055 map
->stripes
[stripe_index
].physical
+
2056 stripe_offset
+ stripe_nr
* map
->stripe_len
;
2057 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
2063 multi
->num_stripes
= num_stripes
;
2064 multi
->max_errors
= max_errors
;
2067 free_extent_map(em
);
2071 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2072 u64 logical
, u64
*length
,
2073 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
2075 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
2079 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
2080 u64 logical
, struct page
*page
)
2082 u64 length
= PAGE_CACHE_SIZE
;
2083 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
2088 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
2090 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
2091 int is_orig_bio
= 0;
2094 atomic_inc(&multi
->error
);
2096 if (bio
== multi
->orig_bio
)
2099 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
2102 bio
= multi
->orig_bio
;
2104 bio
->bi_private
= multi
->private;
2105 bio
->bi_end_io
= multi
->end_io
;
2106 /* only send an error to the higher layers if it is
2107 * beyond the tolerance of the multi-bio
2109 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
2113 * this bio is actually up to date, we didn't
2114 * go over the max number of errors
2116 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2121 bio_endio(bio
, err
);
2122 } else if (!is_orig_bio
) {
2127 struct async_sched
{
2130 struct btrfs_fs_info
*info
;
2131 struct btrfs_work work
;
2135 * see run_scheduled_bios for a description of why bios are collected for
2138 * This will add one bio to the pending list for a device and make sure
2139 * the work struct is scheduled.
2141 static int noinline
schedule_bio(struct btrfs_root
*root
,
2142 struct btrfs_device
*device
,
2143 int rw
, struct bio
*bio
)
2145 int should_queue
= 1;
2147 /* don't bother with additional async steps for reads, right now */
2148 if (!(rw
& (1 << BIO_RW
))) {
2150 submit_bio(rw
, bio
);
2156 * nr_async_bios allows us to reliably return congestion to the
2157 * higher layers. Otherwise, the async bio makes it appear we have
2158 * made progress against dirty pages when we've really just put it
2159 * on a queue for later
2161 atomic_inc(&root
->fs_info
->nr_async_bios
);
2162 WARN_ON(bio
->bi_next
);
2163 bio
->bi_next
= NULL
;
2166 spin_lock(&device
->io_lock
);
2168 if (device
->pending_bio_tail
)
2169 device
->pending_bio_tail
->bi_next
= bio
;
2171 device
->pending_bio_tail
= bio
;
2172 if (!device
->pending_bios
)
2173 device
->pending_bios
= bio
;
2174 if (device
->running_pending
)
2177 spin_unlock(&device
->io_lock
);
2180 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
2185 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
2186 int mirror_num
, int async_submit
)
2188 struct btrfs_mapping_tree
*map_tree
;
2189 struct btrfs_device
*dev
;
2190 struct bio
*first_bio
= bio
;
2191 u64 logical
= (u64
)bio
->bi_sector
<< 9;
2194 struct btrfs_multi_bio
*multi
= NULL
;
2199 length
= bio
->bi_size
;
2200 map_tree
= &root
->fs_info
->mapping_tree
;
2201 map_length
= length
;
2203 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
2207 total_devs
= multi
->num_stripes
;
2208 if (map_length
< length
) {
2209 printk("mapping failed logical %Lu bio len %Lu "
2210 "len %Lu\n", logical
, length
, map_length
);
2213 multi
->end_io
= first_bio
->bi_end_io
;
2214 multi
->private = first_bio
->bi_private
;
2215 multi
->orig_bio
= first_bio
;
2216 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
2218 while(dev_nr
< total_devs
) {
2219 if (total_devs
> 1) {
2220 if (dev_nr
< total_devs
- 1) {
2221 bio
= bio_clone(first_bio
, GFP_NOFS
);
2226 bio
->bi_private
= multi
;
2227 bio
->bi_end_io
= end_bio_multi_stripe
;
2229 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
2230 dev
= multi
->stripes
[dev_nr
].dev
;
2231 if (dev
&& dev
->bdev
) {
2232 bio
->bi_bdev
= dev
->bdev
;
2234 schedule_bio(root
, dev
, rw
, bio
);
2236 submit_bio(rw
, bio
);
2238 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
2239 bio
->bi_sector
= logical
>> 9;
2240 bio_endio(bio
, -EIO
);
2244 if (total_devs
== 1)
2249 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
2252 struct list_head
*head
= &root
->fs_info
->fs_devices
->devices
;
2254 return __find_device(head
, devid
, uuid
);
2257 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
2258 u64 devid
, u8
*dev_uuid
)
2260 struct btrfs_device
*device
;
2261 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
2263 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
2264 list_add(&device
->dev_list
,
2265 &fs_devices
->devices
);
2266 list_add(&device
->dev_alloc_list
,
2267 &fs_devices
->alloc_list
);
2268 device
->barriers
= 1;
2269 device
->dev_root
= root
->fs_info
->dev_root
;
2270 device
->devid
= devid
;
2271 device
->work
.func
= pending_bios_fn
;
2272 fs_devices
->num_devices
++;
2273 spin_lock_init(&device
->io_lock
);
2274 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
2279 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
2280 struct extent_buffer
*leaf
,
2281 struct btrfs_chunk
*chunk
)
2283 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2284 struct map_lookup
*map
;
2285 struct extent_map
*em
;
2289 u8 uuid
[BTRFS_UUID_SIZE
];
2294 logical
= key
->offset
;
2295 length
= btrfs_chunk_length(leaf
, chunk
);
2297 spin_lock(&map_tree
->map_tree
.lock
);
2298 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
2299 spin_unlock(&map_tree
->map_tree
.lock
);
2301 /* already mapped? */
2302 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
2303 free_extent_map(em
);
2306 free_extent_map(em
);
2309 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
2313 em
= alloc_extent_map(GFP_NOFS
);
2316 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2317 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2319 free_extent_map(em
);
2323 em
->bdev
= (struct block_device
*)map
;
2324 em
->start
= logical
;
2326 em
->block_start
= 0;
2327 em
->block_len
= em
->len
;
2329 map
->num_stripes
= num_stripes
;
2330 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
2331 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
2332 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
2333 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
2334 map
->type
= btrfs_chunk_type(leaf
, chunk
);
2335 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
2336 for (i
= 0; i
< num_stripes
; i
++) {
2337 map
->stripes
[i
].physical
=
2338 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
2339 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
2340 read_extent_buffer(leaf
, uuid
, (unsigned long)
2341 btrfs_stripe_dev_uuid_nr(chunk
, i
),
2343 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
);
2345 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
2347 free_extent_map(em
);
2350 if (!map
->stripes
[i
].dev
) {
2351 map
->stripes
[i
].dev
=
2352 add_missing_dev(root
, devid
, uuid
);
2353 if (!map
->stripes
[i
].dev
) {
2355 free_extent_map(em
);
2359 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
2362 spin_lock(&map_tree
->map_tree
.lock
);
2363 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
2364 spin_unlock(&map_tree
->map_tree
.lock
);
2366 free_extent_map(em
);
2371 static int fill_device_from_item(struct extent_buffer
*leaf
,
2372 struct btrfs_dev_item
*dev_item
,
2373 struct btrfs_device
*device
)
2377 device
->devid
= btrfs_device_id(leaf
, dev_item
);
2378 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
2379 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
2380 device
->type
= btrfs_device_type(leaf
, dev_item
);
2381 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
2382 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
2383 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
2385 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
2386 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
2391 static int read_one_dev(struct btrfs_root
*root
,
2392 struct extent_buffer
*leaf
,
2393 struct btrfs_dev_item
*dev_item
)
2395 struct btrfs_device
*device
;
2398 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2400 devid
= btrfs_device_id(leaf
, dev_item
);
2401 read_extent_buffer(leaf
, dev_uuid
,
2402 (unsigned long)btrfs_device_uuid(dev_item
),
2404 device
= btrfs_find_device(root
, devid
, dev_uuid
);
2406 printk("warning devid %Lu missing\n", devid
);
2407 device
= add_missing_dev(root
, devid
, dev_uuid
);
2412 fill_device_from_item(leaf
, dev_item
, device
);
2413 device
->dev_root
= root
->fs_info
->dev_root
;
2414 device
->in_fs_metadata
= 1;
2417 ret
= btrfs_open_device(device
);
2425 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
2427 struct btrfs_dev_item
*dev_item
;
2429 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
2431 return read_one_dev(root
, buf
, dev_item
);
2434 int btrfs_read_sys_array(struct btrfs_root
*root
)
2436 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2437 struct extent_buffer
*sb
;
2438 struct btrfs_disk_key
*disk_key
;
2439 struct btrfs_chunk
*chunk
;
2441 unsigned long sb_ptr
;
2447 struct btrfs_key key
;
2449 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
2450 BTRFS_SUPER_INFO_SIZE
);
2453 btrfs_set_buffer_uptodate(sb
);
2454 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
2455 array_size
= btrfs_super_sys_array_size(super_copy
);
2457 ptr
= super_copy
->sys_chunk_array
;
2458 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
2461 while (cur
< array_size
) {
2462 disk_key
= (struct btrfs_disk_key
*)ptr
;
2463 btrfs_disk_key_to_cpu(&key
, disk_key
);
2465 len
= sizeof(*disk_key
); ptr
+= len
;
2469 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2470 chunk
= (struct btrfs_chunk
*)sb_ptr
;
2471 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
2474 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
2475 len
= btrfs_chunk_item_size(num_stripes
);
2484 free_extent_buffer(sb
);
2488 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
2490 struct btrfs_path
*path
;
2491 struct extent_buffer
*leaf
;
2492 struct btrfs_key key
;
2493 struct btrfs_key found_key
;
2497 root
= root
->fs_info
->chunk_root
;
2499 path
= btrfs_alloc_path();
2503 /* first we search for all of the device items, and then we
2504 * read in all of the chunk items. This way we can create chunk
2505 * mappings that reference all of the devices that are afound
2507 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2511 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2513 leaf
= path
->nodes
[0];
2514 slot
= path
->slots
[0];
2515 if (slot
>= btrfs_header_nritems(leaf
)) {
2516 ret
= btrfs_next_leaf(root
, path
);
2523 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
2524 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
2525 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
2527 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
2528 struct btrfs_dev_item
*dev_item
;
2529 dev_item
= btrfs_item_ptr(leaf
, slot
,
2530 struct btrfs_dev_item
);
2531 ret
= read_one_dev(root
, leaf
, dev_item
);
2534 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2535 struct btrfs_chunk
*chunk
;
2536 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
2537 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
2541 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
2543 btrfs_release_path(root
, path
);
2547 btrfs_free_path(path
);