2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/version.h>
24 #include <asm/div64.h>
27 #include "extent_map.h"
29 #include "transaction.h"
30 #include "print-tree.h"
32 #include "async-thread.h"
42 struct btrfs_bio_stripe stripes
[];
45 static int init_first_rw_device(struct btrfs_trans_handle
*trans
,
46 struct btrfs_root
*root
,
47 struct btrfs_device
*device
);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
);
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static DEFINE_MUTEX(uuid_mutex
);
54 static LIST_HEAD(fs_uuids
);
56 void btrfs_lock_volumes(void)
58 mutex_lock(&uuid_mutex
);
61 void btrfs_unlock_volumes(void)
63 mutex_unlock(&uuid_mutex
);
66 static void lock_chunks(struct btrfs_root
*root
)
68 mutex_lock(&root
->fs_info
->chunk_mutex
);
71 static void unlock_chunks(struct btrfs_root
*root
)
73 mutex_unlock(&root
->fs_info
->chunk_mutex
);
76 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
78 struct btrfs_device
*device
;
79 WARN_ON(fs_devices
->opened
);
80 while (!list_empty(&fs_devices
->devices
)) {
81 device
= list_entry(fs_devices
->devices
.next
,
82 struct btrfs_device
, dev_list
);
83 list_del(&device
->dev_list
);
90 int btrfs_cleanup_fs_uuids(void)
92 struct btrfs_fs_devices
*fs_devices
;
94 while (!list_empty(&fs_uuids
)) {
95 fs_devices
= list_entry(fs_uuids
.next
,
96 struct btrfs_fs_devices
, list
);
97 list_del(&fs_devices
->list
);
98 free_fs_devices(fs_devices
);
103 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
106 struct btrfs_device
*dev
;
107 struct list_head
*cur
;
109 list_for_each(cur
, head
) {
110 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
111 if (dev
->devid
== devid
&&
112 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
119 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
121 struct list_head
*cur
;
122 struct btrfs_fs_devices
*fs_devices
;
124 list_for_each(cur
, &fs_uuids
) {
125 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
126 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
133 * we try to collect pending bios for a device so we don't get a large
134 * number of procs sending bios down to the same device. This greatly
135 * improves the schedulers ability to collect and merge the bios.
137 * But, it also turns into a long list of bios to process and that is sure
138 * to eventually make the worker thread block. The solution here is to
139 * make some progress and then put this work struct back at the end of
140 * the list if the block device is congested. This way, multiple devices
141 * can make progress from a single worker thread.
143 static noinline
int run_scheduled_bios(struct btrfs_device
*device
)
146 struct backing_dev_info
*bdi
;
147 struct btrfs_fs_info
*fs_info
;
151 unsigned long num_run
= 0;
154 bdi
= device
->bdev
->bd_inode
->i_mapping
->backing_dev_info
;
155 fs_info
= device
->dev_root
->fs_info
;
156 limit
= btrfs_async_submit_limit(fs_info
);
157 limit
= limit
* 2 / 3;
160 spin_lock(&device
->io_lock
);
162 /* take all the bios off the list at once and process them
163 * later on (without the lock held). But, remember the
164 * tail and other pointers so the bios can be properly reinserted
165 * into the list if we hit congestion
167 pending
= device
->pending_bios
;
168 tail
= device
->pending_bio_tail
;
169 WARN_ON(pending
&& !tail
);
170 device
->pending_bios
= NULL
;
171 device
->pending_bio_tail
= NULL
;
174 * if pending was null this time around, no bios need processing
175 * at all and we can stop. Otherwise it'll loop back up again
176 * and do an additional check so no bios are missed.
178 * device->running_pending is used to synchronize with the
183 device
->running_pending
= 1;
186 device
->running_pending
= 0;
188 spin_unlock(&device
->io_lock
);
192 pending
= pending
->bi_next
;
194 atomic_dec(&fs_info
->nr_async_bios
);
196 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
197 waitqueue_active(&fs_info
->async_submit_wait
))
198 wake_up(&fs_info
->async_submit_wait
);
200 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
202 submit_bio(cur
->bi_rw
, cur
);
207 * we made progress, there is more work to do and the bdi
208 * is now congested. Back off and let other work structs
211 if (pending
&& bdi_write_congested(bdi
) &&
212 fs_info
->fs_devices
->open_devices
> 1) {
213 struct bio
*old_head
;
215 spin_lock(&device
->io_lock
);
217 old_head
= device
->pending_bios
;
218 device
->pending_bios
= pending
;
219 if (device
->pending_bio_tail
)
220 tail
->bi_next
= old_head
;
222 device
->pending_bio_tail
= tail
;
223 device
->running_pending
= 0;
225 spin_unlock(&device
->io_lock
);
226 btrfs_requeue_work(&device
->work
);
236 static void pending_bios_fn(struct btrfs_work
*work
)
238 struct btrfs_device
*device
;
240 device
= container_of(work
, struct btrfs_device
, work
);
241 run_scheduled_bios(device
);
244 static noinline
int device_list_add(const char *path
,
245 struct btrfs_super_block
*disk_super
,
246 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
248 struct btrfs_device
*device
;
249 struct btrfs_fs_devices
*fs_devices
;
250 u64 found_transid
= btrfs_super_generation(disk_super
);
252 fs_devices
= find_fsid(disk_super
->fsid
);
254 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
257 INIT_LIST_HEAD(&fs_devices
->devices
);
258 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
259 list_add(&fs_devices
->list
, &fs_uuids
);
260 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
261 fs_devices
->latest_devid
= devid
;
262 fs_devices
->latest_trans
= found_transid
;
265 device
= __find_device(&fs_devices
->devices
, devid
,
266 disk_super
->dev_item
.uuid
);
269 if (fs_devices
->opened
)
272 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
274 /* we can safely leave the fs_devices entry around */
277 device
->devid
= devid
;
278 device
->work
.func
= pending_bios_fn
;
279 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
281 device
->barriers
= 1;
282 spin_lock_init(&device
->io_lock
);
283 device
->name
= kstrdup(path
, GFP_NOFS
);
288 INIT_LIST_HEAD(&device
->dev_alloc_list
);
289 list_add(&device
->dev_list
, &fs_devices
->devices
);
290 device
->fs_devices
= fs_devices
;
291 fs_devices
->num_devices
++;
294 if (found_transid
> fs_devices
->latest_trans
) {
295 fs_devices
->latest_devid
= devid
;
296 fs_devices
->latest_trans
= found_transid
;
298 *fs_devices_ret
= fs_devices
;
302 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
304 struct btrfs_fs_devices
*fs_devices
;
305 struct btrfs_device
*device
;
306 struct btrfs_device
*orig_dev
;
308 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
310 return ERR_PTR(-ENOMEM
);
312 INIT_LIST_HEAD(&fs_devices
->devices
);
313 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
314 INIT_LIST_HEAD(&fs_devices
->list
);
315 fs_devices
->latest_devid
= orig
->latest_devid
;
316 fs_devices
->latest_trans
= orig
->latest_trans
;
317 memcpy(fs_devices
->fsid
, orig
->fsid
, sizeof(fs_devices
->fsid
));
319 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
320 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
324 device
->name
= kstrdup(orig_dev
->name
, GFP_NOFS
);
328 device
->devid
= orig_dev
->devid
;
329 device
->work
.func
= pending_bios_fn
;
330 memcpy(device
->uuid
, orig_dev
->uuid
, sizeof(device
->uuid
));
331 device
->barriers
= 1;
332 spin_lock_init(&device
->io_lock
);
333 INIT_LIST_HEAD(&device
->dev_list
);
334 INIT_LIST_HEAD(&device
->dev_alloc_list
);
336 list_add(&device
->dev_list
, &fs_devices
->devices
);
337 device
->fs_devices
= fs_devices
;
338 fs_devices
->num_devices
++;
342 free_fs_devices(fs_devices
);
343 return ERR_PTR(-ENOMEM
);
346 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
348 struct list_head
*tmp
;
349 struct list_head
*cur
;
350 struct btrfs_device
*device
;
352 mutex_lock(&uuid_mutex
);
354 list_for_each_safe(cur
, tmp
, &fs_devices
->devices
) {
355 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
356 if (device
->in_fs_metadata
)
360 close_bdev_exclusive(device
->bdev
, device
->mode
);
362 fs_devices
->open_devices
--;
364 if (device
->writeable
) {
365 list_del_init(&device
->dev_alloc_list
);
366 device
->writeable
= 0;
367 fs_devices
->rw_devices
--;
369 list_del_init(&device
->dev_list
);
370 fs_devices
->num_devices
--;
375 if (fs_devices
->seed
) {
376 fs_devices
= fs_devices
->seed
;
380 mutex_unlock(&uuid_mutex
);
384 static int __btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
386 struct list_head
*cur
;
387 struct btrfs_device
*device
;
389 if (--fs_devices
->opened
> 0)
392 list_for_each(cur
, &fs_devices
->devices
) {
393 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
395 close_bdev_exclusive(device
->bdev
, device
->mode
);
396 fs_devices
->open_devices
--;
398 if (device
->writeable
) {
399 list_del_init(&device
->dev_alloc_list
);
400 fs_devices
->rw_devices
--;
404 device
->writeable
= 0;
405 device
->in_fs_metadata
= 0;
407 WARN_ON(fs_devices
->open_devices
);
408 WARN_ON(fs_devices
->rw_devices
);
409 fs_devices
->opened
= 0;
410 fs_devices
->seeding
= 0;
415 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
417 struct btrfs_fs_devices
*seed_devices
= NULL
;
420 mutex_lock(&uuid_mutex
);
421 ret
= __btrfs_close_devices(fs_devices
);
422 if (!fs_devices
->opened
) {
423 seed_devices
= fs_devices
->seed
;
424 fs_devices
->seed
= NULL
;
426 mutex_unlock(&uuid_mutex
);
428 while (seed_devices
) {
429 fs_devices
= seed_devices
;
430 seed_devices
= fs_devices
->seed
;
431 __btrfs_close_devices(fs_devices
);
432 free_fs_devices(fs_devices
);
437 static int __btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
438 fmode_t flags
, void *holder
)
440 struct block_device
*bdev
;
441 struct list_head
*head
= &fs_devices
->devices
;
442 struct list_head
*cur
;
443 struct btrfs_device
*device
;
444 struct block_device
*latest_bdev
= NULL
;
445 struct buffer_head
*bh
;
446 struct btrfs_super_block
*disk_super
;
447 u64 latest_devid
= 0;
448 u64 latest_transid
= 0;
453 list_for_each(cur
, head
) {
454 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
460 bdev
= open_bdev_exclusive(device
->name
, flags
, holder
);
462 printk(KERN_INFO
"open %s failed\n", device
->name
);
465 set_blocksize(bdev
, 4096);
467 bh
= btrfs_read_dev_super(bdev
);
471 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
472 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
473 if (devid
!= device
->devid
)
476 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
,
480 device
->generation
= btrfs_super_generation(disk_super
);
481 if (!latest_transid
|| device
->generation
> latest_transid
) {
482 latest_devid
= devid
;
483 latest_transid
= device
->generation
;
487 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
488 device
->writeable
= 0;
490 device
->writeable
= !bdev_read_only(bdev
);
495 device
->in_fs_metadata
= 0;
496 device
->mode
= flags
;
498 fs_devices
->open_devices
++;
499 if (device
->writeable
) {
500 fs_devices
->rw_devices
++;
501 list_add(&device
->dev_alloc_list
,
502 &fs_devices
->alloc_list
);
509 close_bdev_exclusive(bdev
, FMODE_READ
);
513 if (fs_devices
->open_devices
== 0) {
517 fs_devices
->seeding
= seeding
;
518 fs_devices
->opened
= 1;
519 fs_devices
->latest_bdev
= latest_bdev
;
520 fs_devices
->latest_devid
= latest_devid
;
521 fs_devices
->latest_trans
= latest_transid
;
522 fs_devices
->total_rw_bytes
= 0;
527 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
528 fmode_t flags
, void *holder
)
532 mutex_lock(&uuid_mutex
);
533 if (fs_devices
->opened
) {
534 fs_devices
->opened
++;
537 ret
= __btrfs_open_devices(fs_devices
, flags
, holder
);
539 mutex_unlock(&uuid_mutex
);
543 int btrfs_scan_one_device(const char *path
, fmode_t flags
, void *holder
,
544 struct btrfs_fs_devices
**fs_devices_ret
)
546 struct btrfs_super_block
*disk_super
;
547 struct block_device
*bdev
;
548 struct buffer_head
*bh
;
553 mutex_lock(&uuid_mutex
);
555 bdev
= open_bdev_exclusive(path
, flags
, holder
);
562 ret
= set_blocksize(bdev
, 4096);
565 bh
= btrfs_read_dev_super(bdev
);
570 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
571 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
572 transid
= btrfs_super_generation(disk_super
);
573 if (disk_super
->label
[0])
574 printk(KERN_INFO
"device label %s ", disk_super
->label
);
576 /* FIXME, make a readl uuid parser */
577 printk(KERN_INFO
"device fsid %llx-%llx ",
578 *(unsigned long long *)disk_super
->fsid
,
579 *(unsigned long long *)(disk_super
->fsid
+ 8));
581 printk(KERN_INFO
"devid %llu transid %llu %s\n",
582 (unsigned long long)devid
, (unsigned long long)transid
, path
);
583 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
587 close_bdev_exclusive(bdev
, flags
);
589 mutex_unlock(&uuid_mutex
);
594 * this uses a pretty simple search, the expectation is that it is
595 * called very infrequently and that a given device has a small number
598 static noinline
int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
599 struct btrfs_device
*device
,
600 u64 num_bytes
, u64
*start
)
602 struct btrfs_key key
;
603 struct btrfs_root
*root
= device
->dev_root
;
604 struct btrfs_dev_extent
*dev_extent
= NULL
;
605 struct btrfs_path
*path
;
608 u64 search_start
= 0;
609 u64 search_end
= device
->total_bytes
;
613 struct extent_buffer
*l
;
615 path
= btrfs_alloc_path();
621 /* FIXME use last free of some kind */
623 /* we don't want to overwrite the superblock on the drive,
624 * so we make sure to start at an offset of at least 1MB
626 search_start
= max((u64
)1024 * 1024, search_start
);
628 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
629 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
631 key
.objectid
= device
->devid
;
632 key
.offset
= search_start
;
633 key
.type
= BTRFS_DEV_EXTENT_KEY
;
634 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
637 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
641 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
644 slot
= path
->slots
[0];
645 if (slot
>= btrfs_header_nritems(l
)) {
646 ret
= btrfs_next_leaf(root
, path
);
653 if (search_start
>= search_end
) {
657 *start
= search_start
;
661 *start
= last_byte
> search_start
?
662 last_byte
: search_start
;
663 if (search_end
<= *start
) {
669 btrfs_item_key_to_cpu(l
, &key
, slot
);
671 if (key
.objectid
< device
->devid
)
674 if (key
.objectid
> device
->devid
)
677 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
679 if (last_byte
< search_start
)
680 last_byte
= search_start
;
681 hole_size
= key
.offset
- last_byte
;
682 if (key
.offset
> last_byte
&&
683 hole_size
>= num_bytes
) {
688 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
692 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
693 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
699 /* we have to make sure we didn't find an extent that has already
700 * been allocated by the map tree or the original allocation
702 BUG_ON(*start
< search_start
);
704 if (*start
+ num_bytes
> search_end
) {
708 /* check for pending inserts here */
712 btrfs_free_path(path
);
716 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
717 struct btrfs_device
*device
,
721 struct btrfs_path
*path
;
722 struct btrfs_root
*root
= device
->dev_root
;
723 struct btrfs_key key
;
724 struct btrfs_key found_key
;
725 struct extent_buffer
*leaf
= NULL
;
726 struct btrfs_dev_extent
*extent
= NULL
;
728 path
= btrfs_alloc_path();
732 key
.objectid
= device
->devid
;
734 key
.type
= BTRFS_DEV_EXTENT_KEY
;
736 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
738 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
739 BTRFS_DEV_EXTENT_KEY
);
741 leaf
= path
->nodes
[0];
742 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
743 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
744 struct btrfs_dev_extent
);
745 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
746 btrfs_dev_extent_length(leaf
, extent
) < start
);
748 } else if (ret
== 0) {
749 leaf
= path
->nodes
[0];
750 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
751 struct btrfs_dev_extent
);
755 if (device
->bytes_used
> 0)
756 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
757 ret
= btrfs_del_item(trans
, root
, path
);
760 btrfs_free_path(path
);
764 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
765 struct btrfs_device
*device
,
766 u64 chunk_tree
, u64 chunk_objectid
,
767 u64 chunk_offset
, u64 start
, u64 num_bytes
)
770 struct btrfs_path
*path
;
771 struct btrfs_root
*root
= device
->dev_root
;
772 struct btrfs_dev_extent
*extent
;
773 struct extent_buffer
*leaf
;
774 struct btrfs_key key
;
776 WARN_ON(!device
->in_fs_metadata
);
777 path
= btrfs_alloc_path();
781 key
.objectid
= device
->devid
;
783 key
.type
= BTRFS_DEV_EXTENT_KEY
;
784 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
788 leaf
= path
->nodes
[0];
789 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
790 struct btrfs_dev_extent
);
791 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
792 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
793 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
795 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
796 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
799 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
800 btrfs_mark_buffer_dirty(leaf
);
801 btrfs_free_path(path
);
805 static noinline
int find_next_chunk(struct btrfs_root
*root
,
806 u64 objectid
, u64
*offset
)
808 struct btrfs_path
*path
;
810 struct btrfs_key key
;
811 struct btrfs_chunk
*chunk
;
812 struct btrfs_key found_key
;
814 path
= btrfs_alloc_path();
817 key
.objectid
= objectid
;
818 key
.offset
= (u64
)-1;
819 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
821 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
827 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
831 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
833 if (found_key
.objectid
!= objectid
)
836 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
838 *offset
= found_key
.offset
+
839 btrfs_chunk_length(path
->nodes
[0], chunk
);
844 btrfs_free_path(path
);
848 static noinline
int find_next_devid(struct btrfs_root
*root
, u64
*objectid
)
851 struct btrfs_key key
;
852 struct btrfs_key found_key
;
853 struct btrfs_path
*path
;
855 root
= root
->fs_info
->chunk_root
;
857 path
= btrfs_alloc_path();
861 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
862 key
.type
= BTRFS_DEV_ITEM_KEY
;
863 key
.offset
= (u64
)-1;
865 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
871 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
876 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
878 *objectid
= found_key
.offset
+ 1;
882 btrfs_free_path(path
);
887 * the device information is stored in the chunk root
888 * the btrfs_device struct should be fully filled in
890 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
891 struct btrfs_root
*root
,
892 struct btrfs_device
*device
)
895 struct btrfs_path
*path
;
896 struct btrfs_dev_item
*dev_item
;
897 struct extent_buffer
*leaf
;
898 struct btrfs_key key
;
901 root
= root
->fs_info
->chunk_root
;
903 path
= btrfs_alloc_path();
907 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
908 key
.type
= BTRFS_DEV_ITEM_KEY
;
909 key
.offset
= device
->devid
;
911 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
916 leaf
= path
->nodes
[0];
917 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
919 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
920 btrfs_set_device_generation(leaf
, dev_item
, 0);
921 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
922 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
923 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
924 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
925 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
926 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
927 btrfs_set_device_group(leaf
, dev_item
, 0);
928 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
929 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
930 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
932 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
933 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
934 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
935 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
936 btrfs_mark_buffer_dirty(leaf
);
940 btrfs_free_path(path
);
944 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
945 struct btrfs_device
*device
)
948 struct btrfs_path
*path
;
949 struct btrfs_key key
;
950 struct btrfs_trans_handle
*trans
;
952 root
= root
->fs_info
->chunk_root
;
954 path
= btrfs_alloc_path();
958 trans
= btrfs_start_transaction(root
, 1);
959 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
960 key
.type
= BTRFS_DEV_ITEM_KEY
;
961 key
.offset
= device
->devid
;
964 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
973 ret
= btrfs_del_item(trans
, root
, path
);
977 btrfs_free_path(path
);
979 btrfs_commit_transaction(trans
, root
);
983 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
985 struct btrfs_device
*device
;
986 struct btrfs_device
*next_device
;
987 struct block_device
*bdev
;
988 struct buffer_head
*bh
= NULL
;
989 struct btrfs_super_block
*disk_super
;
996 mutex_lock(&uuid_mutex
);
997 mutex_lock(&root
->fs_info
->volume_mutex
);
999 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
1000 root
->fs_info
->avail_system_alloc_bits
|
1001 root
->fs_info
->avail_metadata_alloc_bits
;
1003 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
1004 root
->fs_info
->fs_devices
->rw_devices
<= 4) {
1005 printk(KERN_ERR
"btrfs: unable to go below four devices "
1011 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
1012 root
->fs_info
->fs_devices
->rw_devices
<= 2) {
1013 printk(KERN_ERR
"btrfs: unable to go below two "
1014 "devices on raid1\n");
1019 if (strcmp(device_path
, "missing") == 0) {
1020 struct list_head
*cur
;
1021 struct list_head
*devices
;
1022 struct btrfs_device
*tmp
;
1025 devices
= &root
->fs_info
->fs_devices
->devices
;
1026 list_for_each(cur
, devices
) {
1027 tmp
= list_entry(cur
, struct btrfs_device
, dev_list
);
1028 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
1037 printk(KERN_ERR
"btrfs: no missing devices found to "
1042 bdev
= open_bdev_exclusive(device_path
, FMODE_READ
,
1043 root
->fs_info
->bdev_holder
);
1045 ret
= PTR_ERR(bdev
);
1049 set_blocksize(bdev
, 4096);
1050 bh
= btrfs_read_dev_super(bdev
);
1055 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
1056 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
1057 dev_uuid
= disk_super
->dev_item
.uuid
;
1058 device
= btrfs_find_device(root
, devid
, dev_uuid
,
1066 if (device
->writeable
&& root
->fs_info
->fs_devices
->rw_devices
== 1) {
1067 printk(KERN_ERR
"btrfs: unable to remove the only writeable "
1073 if (device
->writeable
) {
1074 list_del_init(&device
->dev_alloc_list
);
1075 root
->fs_info
->fs_devices
->rw_devices
--;
1078 ret
= btrfs_shrink_device(device
, 0);
1082 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
1086 device
->in_fs_metadata
= 0;
1087 list_del_init(&device
->dev_list
);
1088 device
->fs_devices
->num_devices
--;
1090 next_device
= list_entry(root
->fs_info
->fs_devices
->devices
.next
,
1091 struct btrfs_device
, dev_list
);
1092 if (device
->bdev
== root
->fs_info
->sb
->s_bdev
)
1093 root
->fs_info
->sb
->s_bdev
= next_device
->bdev
;
1094 if (device
->bdev
== root
->fs_info
->fs_devices
->latest_bdev
)
1095 root
->fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1098 close_bdev_exclusive(device
->bdev
, device
->mode
);
1099 device
->bdev
= NULL
;
1100 device
->fs_devices
->open_devices
--;
1103 num_devices
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1104 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
, num_devices
);
1106 if (device
->fs_devices
->open_devices
== 0) {
1107 struct btrfs_fs_devices
*fs_devices
;
1108 fs_devices
= root
->fs_info
->fs_devices
;
1109 while (fs_devices
) {
1110 if (fs_devices
->seed
== device
->fs_devices
)
1112 fs_devices
= fs_devices
->seed
;
1114 fs_devices
->seed
= device
->fs_devices
->seed
;
1115 device
->fs_devices
->seed
= NULL
;
1116 __btrfs_close_devices(device
->fs_devices
);
1117 free_fs_devices(device
->fs_devices
);
1121 * at this point, the device is zero sized. We want to
1122 * remove it from the devices list and zero out the old super
1124 if (device
->writeable
) {
1125 /* make sure this device isn't detected as part of
1128 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
1129 set_buffer_dirty(bh
);
1130 sync_dirty_buffer(bh
);
1133 kfree(device
->name
);
1141 close_bdev_exclusive(bdev
, FMODE_READ
);
1143 mutex_unlock(&root
->fs_info
->volume_mutex
);
1144 mutex_unlock(&uuid_mutex
);
1149 * does all the dirty work required for changing file system's UUID.
1151 static int btrfs_prepare_sprout(struct btrfs_trans_handle
*trans
,
1152 struct btrfs_root
*root
)
1154 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
1155 struct btrfs_fs_devices
*old_devices
;
1156 struct btrfs_fs_devices
*seed_devices
;
1157 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
1158 struct btrfs_device
*device
;
1161 BUG_ON(!mutex_is_locked(&uuid_mutex
));
1162 if (!fs_devices
->seeding
)
1165 seed_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1169 old_devices
= clone_fs_devices(fs_devices
);
1170 if (IS_ERR(old_devices
)) {
1171 kfree(seed_devices
);
1172 return PTR_ERR(old_devices
);
1175 list_add(&old_devices
->list
, &fs_uuids
);
1177 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
1178 seed_devices
->opened
= 1;
1179 INIT_LIST_HEAD(&seed_devices
->devices
);
1180 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
1181 list_splice_init(&fs_devices
->devices
, &seed_devices
->devices
);
1182 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
1183 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
) {
1184 device
->fs_devices
= seed_devices
;
1187 fs_devices
->seeding
= 0;
1188 fs_devices
->num_devices
= 0;
1189 fs_devices
->open_devices
= 0;
1190 fs_devices
->seed
= seed_devices
;
1192 generate_random_uuid(fs_devices
->fsid
);
1193 memcpy(root
->fs_info
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1194 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1195 super_flags
= btrfs_super_flags(disk_super
) &
1196 ~BTRFS_SUPER_FLAG_SEEDING
;
1197 btrfs_set_super_flags(disk_super
, super_flags
);
1203 * strore the expected generation for seed devices in device items.
1205 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
,
1206 struct btrfs_root
*root
)
1208 struct btrfs_path
*path
;
1209 struct extent_buffer
*leaf
;
1210 struct btrfs_dev_item
*dev_item
;
1211 struct btrfs_device
*device
;
1212 struct btrfs_key key
;
1213 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1214 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1218 path
= btrfs_alloc_path();
1222 root
= root
->fs_info
->chunk_root
;
1223 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1225 key
.type
= BTRFS_DEV_ITEM_KEY
;
1228 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1232 leaf
= path
->nodes
[0];
1234 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1235 ret
= btrfs_next_leaf(root
, path
);
1240 leaf
= path
->nodes
[0];
1241 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1242 btrfs_release_path(root
, path
);
1246 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1247 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
1248 key
.type
!= BTRFS_DEV_ITEM_KEY
)
1251 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1252 struct btrfs_dev_item
);
1253 devid
= btrfs_device_id(leaf
, dev_item
);
1254 read_extent_buffer(leaf
, dev_uuid
,
1255 (unsigned long)btrfs_device_uuid(dev_item
),
1257 read_extent_buffer(leaf
, fs_uuid
,
1258 (unsigned long)btrfs_device_fsid(dev_item
),
1260 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1263 if (device
->fs_devices
->seeding
) {
1264 btrfs_set_device_generation(leaf
, dev_item
,
1265 device
->generation
);
1266 btrfs_mark_buffer_dirty(leaf
);
1274 btrfs_free_path(path
);
1278 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1280 struct btrfs_trans_handle
*trans
;
1281 struct btrfs_device
*device
;
1282 struct block_device
*bdev
;
1283 struct list_head
*cur
;
1284 struct list_head
*devices
;
1285 struct super_block
*sb
= root
->fs_info
->sb
;
1287 int seeding_dev
= 0;
1290 if ((sb
->s_flags
& MS_RDONLY
) && !root
->fs_info
->fs_devices
->seeding
)
1293 bdev
= open_bdev_exclusive(device_path
, 0, root
->fs_info
->bdev_holder
);
1297 if (root
->fs_info
->fs_devices
->seeding
) {
1299 down_write(&sb
->s_umount
);
1300 mutex_lock(&uuid_mutex
);
1303 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1304 mutex_lock(&root
->fs_info
->volume_mutex
);
1306 devices
= &root
->fs_info
->fs_devices
->devices
;
1307 list_for_each(cur
, devices
) {
1308 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1309 if (device
->bdev
== bdev
) {
1315 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1317 /* we can safely leave the fs_devices entry around */
1322 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1323 if (!device
->name
) {
1329 ret
= find_next_devid(root
, &device
->devid
);
1335 trans
= btrfs_start_transaction(root
, 1);
1338 device
->barriers
= 1;
1339 device
->writeable
= 1;
1340 device
->work
.func
= pending_bios_fn
;
1341 generate_random_uuid(device
->uuid
);
1342 spin_lock_init(&device
->io_lock
);
1343 device
->generation
= trans
->transid
;
1344 device
->io_width
= root
->sectorsize
;
1345 device
->io_align
= root
->sectorsize
;
1346 device
->sector_size
= root
->sectorsize
;
1347 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1348 device
->dev_root
= root
->fs_info
->dev_root
;
1349 device
->bdev
= bdev
;
1350 device
->in_fs_metadata
= 1;
1352 set_blocksize(device
->bdev
, 4096);
1355 sb
->s_flags
&= ~MS_RDONLY
;
1356 ret
= btrfs_prepare_sprout(trans
, root
);
1360 device
->fs_devices
= root
->fs_info
->fs_devices
;
1361 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1362 list_add(&device
->dev_alloc_list
,
1363 &root
->fs_info
->fs_devices
->alloc_list
);
1364 root
->fs_info
->fs_devices
->num_devices
++;
1365 root
->fs_info
->fs_devices
->open_devices
++;
1366 root
->fs_info
->fs_devices
->rw_devices
++;
1367 root
->fs_info
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
1369 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1370 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1371 total_bytes
+ device
->total_bytes
);
1373 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1374 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1378 ret
= init_first_rw_device(trans
, root
, device
);
1380 ret
= btrfs_finish_sprout(trans
, root
);
1383 ret
= btrfs_add_device(trans
, root
, device
);
1386 unlock_chunks(root
);
1387 btrfs_commit_transaction(trans
, root
);
1390 mutex_unlock(&uuid_mutex
);
1391 up_write(&sb
->s_umount
);
1393 ret
= btrfs_relocate_sys_chunks(root
);
1397 mutex_unlock(&root
->fs_info
->volume_mutex
);
1400 close_bdev_exclusive(bdev
, 0);
1402 mutex_unlock(&uuid_mutex
);
1403 up_write(&sb
->s_umount
);
1408 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
1409 struct btrfs_device
*device
)
1412 struct btrfs_path
*path
;
1413 struct btrfs_root
*root
;
1414 struct btrfs_dev_item
*dev_item
;
1415 struct extent_buffer
*leaf
;
1416 struct btrfs_key key
;
1418 root
= device
->dev_root
->fs_info
->chunk_root
;
1420 path
= btrfs_alloc_path();
1424 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1425 key
.type
= BTRFS_DEV_ITEM_KEY
;
1426 key
.offset
= device
->devid
;
1428 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1437 leaf
= path
->nodes
[0];
1438 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1440 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1441 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1442 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1443 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1444 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1445 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1446 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1447 btrfs_mark_buffer_dirty(leaf
);
1450 btrfs_free_path(path
);
1454 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1455 struct btrfs_device
*device
, u64 new_size
)
1457 struct btrfs_super_block
*super_copy
=
1458 &device
->dev_root
->fs_info
->super_copy
;
1459 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1460 u64 diff
= new_size
- device
->total_bytes
;
1462 if (!device
->writeable
)
1464 if (new_size
<= device
->total_bytes
)
1467 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1468 device
->fs_devices
->total_rw_bytes
+= diff
;
1470 device
->total_bytes
= new_size
;
1471 return btrfs_update_device(trans
, device
);
1474 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1475 struct btrfs_device
*device
, u64 new_size
)
1478 lock_chunks(device
->dev_root
);
1479 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1480 unlock_chunks(device
->dev_root
);
1484 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1485 struct btrfs_root
*root
,
1486 u64 chunk_tree
, u64 chunk_objectid
,
1490 struct btrfs_path
*path
;
1491 struct btrfs_key key
;
1493 root
= root
->fs_info
->chunk_root
;
1494 path
= btrfs_alloc_path();
1498 key
.objectid
= chunk_objectid
;
1499 key
.offset
= chunk_offset
;
1500 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1502 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1505 ret
= btrfs_del_item(trans
, root
, path
);
1508 btrfs_free_path(path
);
1512 static int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1515 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1516 struct btrfs_disk_key
*disk_key
;
1517 struct btrfs_chunk
*chunk
;
1524 struct btrfs_key key
;
1526 array_size
= btrfs_super_sys_array_size(super_copy
);
1528 ptr
= super_copy
->sys_chunk_array
;
1531 while (cur
< array_size
) {
1532 disk_key
= (struct btrfs_disk_key
*)ptr
;
1533 btrfs_disk_key_to_cpu(&key
, disk_key
);
1535 len
= sizeof(*disk_key
);
1537 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1538 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1539 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1540 len
+= btrfs_chunk_item_size(num_stripes
);
1545 if (key
.objectid
== chunk_objectid
&&
1546 key
.offset
== chunk_offset
) {
1547 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1549 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1558 static int btrfs_relocate_chunk(struct btrfs_root
*root
,
1559 u64 chunk_tree
, u64 chunk_objectid
,
1562 struct extent_map_tree
*em_tree
;
1563 struct btrfs_root
*extent_root
;
1564 struct btrfs_trans_handle
*trans
;
1565 struct extent_map
*em
;
1566 struct map_lookup
*map
;
1570 printk(KERN_INFO
"btrfs relocating chunk %llu\n",
1571 (unsigned long long)chunk_offset
);
1572 root
= root
->fs_info
->chunk_root
;
1573 extent_root
= root
->fs_info
->extent_root
;
1574 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1576 /* step one, relocate all the extents inside this chunk */
1577 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1580 trans
= btrfs_start_transaction(root
, 1);
1586 * step two, delete the device extents and the
1587 * chunk tree entries
1589 spin_lock(&em_tree
->lock
);
1590 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1591 spin_unlock(&em_tree
->lock
);
1593 BUG_ON(em
->start
> chunk_offset
||
1594 em
->start
+ em
->len
< chunk_offset
);
1595 map
= (struct map_lookup
*)em
->bdev
;
1597 for (i
= 0; i
< map
->num_stripes
; i
++) {
1598 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1599 map
->stripes
[i
].physical
);
1602 if (map
->stripes
[i
].dev
) {
1603 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1607 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1612 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1613 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1617 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1620 spin_lock(&em_tree
->lock
);
1621 remove_extent_mapping(em_tree
, em
);
1622 spin_unlock(&em_tree
->lock
);
1627 /* once for the tree */
1628 free_extent_map(em
);
1630 free_extent_map(em
);
1632 unlock_chunks(root
);
1633 btrfs_end_transaction(trans
, root
);
1637 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
)
1639 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
1640 struct btrfs_path
*path
;
1641 struct extent_buffer
*leaf
;
1642 struct btrfs_chunk
*chunk
;
1643 struct btrfs_key key
;
1644 struct btrfs_key found_key
;
1645 u64 chunk_tree
= chunk_root
->root_key
.objectid
;
1649 path
= btrfs_alloc_path();
1653 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1654 key
.offset
= (u64
)-1;
1655 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1658 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1663 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
1670 leaf
= path
->nodes
[0];
1671 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1673 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
1674 struct btrfs_chunk
);
1675 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
1676 btrfs_release_path(chunk_root
, path
);
1678 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1679 ret
= btrfs_relocate_chunk(chunk_root
, chunk_tree
,
1685 if (found_key
.offset
== 0)
1687 key
.offset
= found_key
.offset
- 1;
1691 btrfs_free_path(path
);
1695 static u64
div_factor(u64 num
, int factor
)
1704 int btrfs_balance(struct btrfs_root
*dev_root
)
1707 struct list_head
*cur
;
1708 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
1709 struct btrfs_device
*device
;
1712 struct btrfs_path
*path
;
1713 struct btrfs_key key
;
1714 struct btrfs_chunk
*chunk
;
1715 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
1716 struct btrfs_trans_handle
*trans
;
1717 struct btrfs_key found_key
;
1719 if (dev_root
->fs_info
->sb
->s_flags
& MS_RDONLY
)
1722 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
1723 dev_root
= dev_root
->fs_info
->dev_root
;
1725 /* step one make some room on all the devices */
1726 list_for_each(cur
, devices
) {
1727 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
1728 old_size
= device
->total_bytes
;
1729 size_to_free
= div_factor(old_size
, 1);
1730 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
1731 if (!device
->writeable
||
1732 device
->total_bytes
- device
->bytes_used
> size_to_free
)
1735 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
1738 trans
= btrfs_start_transaction(dev_root
, 1);
1741 ret
= btrfs_grow_device(trans
, device
, old_size
);
1744 btrfs_end_transaction(trans
, dev_root
);
1747 /* step two, relocate all the chunks */
1748 path
= btrfs_alloc_path();
1751 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1752 key
.offset
= (u64
)-1;
1753 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1756 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1761 * this shouldn't happen, it means the last relocate
1767 ret
= btrfs_previous_item(chunk_root
, path
, 0,
1768 BTRFS_CHUNK_ITEM_KEY
);
1772 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1774 if (found_key
.objectid
!= key
.objectid
)
1777 chunk
= btrfs_item_ptr(path
->nodes
[0],
1779 struct btrfs_chunk
);
1780 key
.offset
= found_key
.offset
;
1781 /* chunk zero is special */
1782 if (key
.offset
== 0)
1785 btrfs_release_path(chunk_root
, path
);
1786 ret
= btrfs_relocate_chunk(chunk_root
,
1787 chunk_root
->root_key
.objectid
,
1794 btrfs_free_path(path
);
1795 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
1800 * shrinking a device means finding all of the device extents past
1801 * the new size, and then following the back refs to the chunks.
1802 * The chunk relocation code actually frees the device extent
1804 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
1806 struct btrfs_trans_handle
*trans
;
1807 struct btrfs_root
*root
= device
->dev_root
;
1808 struct btrfs_dev_extent
*dev_extent
= NULL
;
1809 struct btrfs_path
*path
;
1816 struct extent_buffer
*l
;
1817 struct btrfs_key key
;
1818 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1819 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1820 u64 diff
= device
->total_bytes
- new_size
;
1822 if (new_size
>= device
->total_bytes
)
1825 path
= btrfs_alloc_path();
1829 trans
= btrfs_start_transaction(root
, 1);
1839 device
->total_bytes
= new_size
;
1840 if (device
->writeable
)
1841 device
->fs_devices
->total_rw_bytes
-= diff
;
1842 ret
= btrfs_update_device(trans
, device
);
1844 unlock_chunks(root
);
1845 btrfs_end_transaction(trans
, root
);
1848 WARN_ON(diff
> old_total
);
1849 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
1850 unlock_chunks(root
);
1851 btrfs_end_transaction(trans
, root
);
1853 key
.objectid
= device
->devid
;
1854 key
.offset
= (u64
)-1;
1855 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1858 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1862 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
1871 slot
= path
->slots
[0];
1872 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
1874 if (key
.objectid
!= device
->devid
)
1877 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1878 length
= btrfs_dev_extent_length(l
, dev_extent
);
1880 if (key
.offset
+ length
<= new_size
)
1883 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
1884 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
1885 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
1886 btrfs_release_path(root
, path
);
1888 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
1895 btrfs_free_path(path
);
1899 static int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
1900 struct btrfs_root
*root
,
1901 struct btrfs_key
*key
,
1902 struct btrfs_chunk
*chunk
, int item_size
)
1904 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1905 struct btrfs_disk_key disk_key
;
1909 array_size
= btrfs_super_sys_array_size(super_copy
);
1910 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
1913 ptr
= super_copy
->sys_chunk_array
+ array_size
;
1914 btrfs_cpu_key_to_disk(&disk_key
, key
);
1915 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
1916 ptr
+= sizeof(disk_key
);
1917 memcpy(ptr
, chunk
, item_size
);
1918 item_size
+= sizeof(disk_key
);
1919 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
1923 static noinline u64
chunk_bytes_by_type(u64 type
, u64 calc_size
,
1924 int num_stripes
, int sub_stripes
)
1926 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
1928 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1929 return calc_size
* (num_stripes
/ sub_stripes
);
1931 return calc_size
* num_stripes
;
1934 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
1935 struct btrfs_root
*extent_root
,
1936 struct map_lookup
**map_ret
,
1937 u64
*num_bytes
, u64
*stripe_size
,
1938 u64 start
, u64 type
)
1940 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1941 struct btrfs_device
*device
= NULL
;
1942 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
1943 struct list_head
*cur
;
1944 struct map_lookup
*map
= NULL
;
1945 struct extent_map_tree
*em_tree
;
1946 struct extent_map
*em
;
1947 struct list_head private_devs
;
1948 int min_stripe_size
= 1 * 1024 * 1024;
1949 u64 calc_size
= 1024 * 1024 * 1024;
1950 u64 max_chunk_size
= calc_size
;
1955 int num_stripes
= 1;
1956 int min_stripes
= 1;
1957 int sub_stripes
= 0;
1961 int stripe_len
= 64 * 1024;
1963 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
1964 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
1966 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
1968 if (list_empty(&fs_devices
->alloc_list
))
1971 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
1972 num_stripes
= fs_devices
->rw_devices
;
1975 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
1979 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
1980 num_stripes
= min_t(u64
, 2, fs_devices
->rw_devices
);
1981 if (num_stripes
< 2)
1985 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1986 num_stripes
= fs_devices
->rw_devices
;
1987 if (num_stripes
< 4)
1989 num_stripes
&= ~(u32
)1;
1994 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
1995 max_chunk_size
= 10 * calc_size
;
1996 min_stripe_size
= 64 * 1024 * 1024;
1997 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
1998 max_chunk_size
= 4 * calc_size
;
1999 min_stripe_size
= 32 * 1024 * 1024;
2000 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2001 calc_size
= 8 * 1024 * 1024;
2002 max_chunk_size
= calc_size
* 2;
2003 min_stripe_size
= 1 * 1024 * 1024;
2006 /* we don't want a chunk larger than 10% of writeable space */
2007 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
2011 if (!map
|| map
->num_stripes
!= num_stripes
) {
2013 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2016 map
->num_stripes
= num_stripes
;
2019 if (calc_size
* num_stripes
> max_chunk_size
) {
2020 calc_size
= max_chunk_size
;
2021 do_div(calc_size
, num_stripes
);
2022 do_div(calc_size
, stripe_len
);
2023 calc_size
*= stripe_len
;
2025 /* we don't want tiny stripes */
2026 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
2028 do_div(calc_size
, stripe_len
);
2029 calc_size
*= stripe_len
;
2031 cur
= fs_devices
->alloc_list
.next
;
2034 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2035 min_free
= calc_size
* 2;
2037 min_free
= calc_size
;
2040 * we add 1MB because we never use the first 1MB of the device, unless
2041 * we've looped, then we are likely allocating the maximum amount of
2042 * space left already
2045 min_free
+= 1024 * 1024;
2047 INIT_LIST_HEAD(&private_devs
);
2048 while (index
< num_stripes
) {
2049 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
2050 BUG_ON(!device
->writeable
);
2051 if (device
->total_bytes
> device
->bytes_used
)
2052 avail
= device
->total_bytes
- device
->bytes_used
;
2057 if (device
->in_fs_metadata
&& avail
>= min_free
) {
2058 ret
= find_free_dev_extent(trans
, device
,
2059 min_free
, &dev_offset
);
2061 list_move_tail(&device
->dev_alloc_list
,
2063 map
->stripes
[index
].dev
= device
;
2064 map
->stripes
[index
].physical
= dev_offset
;
2066 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2067 map
->stripes
[index
].dev
= device
;
2068 map
->stripes
[index
].physical
=
2069 dev_offset
+ calc_size
;
2073 } else if (device
->in_fs_metadata
&& avail
> max_avail
)
2075 if (cur
== &fs_devices
->alloc_list
)
2078 list_splice(&private_devs
, &fs_devices
->alloc_list
);
2079 if (index
< num_stripes
) {
2080 if (index
>= min_stripes
) {
2081 num_stripes
= index
;
2082 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2083 num_stripes
/= sub_stripes
;
2084 num_stripes
*= sub_stripes
;
2089 if (!looped
&& max_avail
> 0) {
2091 calc_size
= max_avail
;
2097 map
->sector_size
= extent_root
->sectorsize
;
2098 map
->stripe_len
= stripe_len
;
2099 map
->io_align
= stripe_len
;
2100 map
->io_width
= stripe_len
;
2102 map
->num_stripes
= num_stripes
;
2103 map
->sub_stripes
= sub_stripes
;
2106 *stripe_size
= calc_size
;
2107 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
2108 num_stripes
, sub_stripes
);
2110 em
= alloc_extent_map(GFP_NOFS
);
2115 em
->bdev
= (struct block_device
*)map
;
2117 em
->len
= *num_bytes
;
2118 em
->block_start
= 0;
2119 em
->block_len
= em
->len
;
2121 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
2122 spin_lock(&em_tree
->lock
);
2123 ret
= add_extent_mapping(em_tree
, em
);
2124 spin_unlock(&em_tree
->lock
);
2126 free_extent_map(em
);
2128 ret
= btrfs_make_block_group(trans
, extent_root
, 0, type
,
2129 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2134 while (index
< map
->num_stripes
) {
2135 device
= map
->stripes
[index
].dev
;
2136 dev_offset
= map
->stripes
[index
].physical
;
2138 ret
= btrfs_alloc_dev_extent(trans
, device
,
2139 info
->chunk_root
->root_key
.objectid
,
2140 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2141 start
, dev_offset
, calc_size
);
2149 static int __finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
2150 struct btrfs_root
*extent_root
,
2151 struct map_lookup
*map
, u64 chunk_offset
,
2152 u64 chunk_size
, u64 stripe_size
)
2155 struct btrfs_key key
;
2156 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2157 struct btrfs_device
*device
;
2158 struct btrfs_chunk
*chunk
;
2159 struct btrfs_stripe
*stripe
;
2160 size_t item_size
= btrfs_chunk_item_size(map
->num_stripes
);
2164 chunk
= kzalloc(item_size
, GFP_NOFS
);
2169 while (index
< map
->num_stripes
) {
2170 device
= map
->stripes
[index
].dev
;
2171 device
->bytes_used
+= stripe_size
;
2172 ret
= btrfs_update_device(trans
, device
);
2178 stripe
= &chunk
->stripe
;
2179 while (index
< map
->num_stripes
) {
2180 device
= map
->stripes
[index
].dev
;
2181 dev_offset
= map
->stripes
[index
].physical
;
2183 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
2184 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
2185 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
2190 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
2191 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
2192 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
2193 btrfs_set_stack_chunk_type(chunk
, map
->type
);
2194 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
2195 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
2196 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
2197 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
2198 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
2200 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2201 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2202 key
.offset
= chunk_offset
;
2204 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
2207 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2208 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
, chunk
,
2217 * Chunk allocation falls into two parts. The first part does works
2218 * that make the new allocated chunk useable, but not do any operation
2219 * that modifies the chunk tree. The second part does the works that
2220 * require modifying the chunk tree. This division is important for the
2221 * bootstrap process of adding storage to a seed btrfs.
2223 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2224 struct btrfs_root
*extent_root
, u64 type
)
2229 struct map_lookup
*map
;
2230 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2233 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2238 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2239 &stripe_size
, chunk_offset
, type
);
2243 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2244 chunk_size
, stripe_size
);
2249 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
,
2250 struct btrfs_root
*root
,
2251 struct btrfs_device
*device
)
2254 u64 sys_chunk_offset
;
2258 u64 sys_stripe_size
;
2260 struct map_lookup
*map
;
2261 struct map_lookup
*sys_map
;
2262 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2263 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2266 ret
= find_next_chunk(fs_info
->chunk_root
,
2267 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, &chunk_offset
);
2270 alloc_profile
= BTRFS_BLOCK_GROUP_METADATA
|
2271 (fs_info
->metadata_alloc_profile
&
2272 fs_info
->avail_metadata_alloc_bits
);
2273 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2275 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2276 &stripe_size
, chunk_offset
, alloc_profile
);
2279 sys_chunk_offset
= chunk_offset
+ chunk_size
;
2281 alloc_profile
= BTRFS_BLOCK_GROUP_SYSTEM
|
2282 (fs_info
->system_alloc_profile
&
2283 fs_info
->avail_system_alloc_bits
);
2284 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2286 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &sys_map
,
2287 &sys_chunk_size
, &sys_stripe_size
,
2288 sys_chunk_offset
, alloc_profile
);
2291 ret
= btrfs_add_device(trans
, fs_info
->chunk_root
, device
);
2295 * Modifying chunk tree needs allocating new blocks from both
2296 * system block group and metadata block group. So we only can
2297 * do operations require modifying the chunk tree after both
2298 * block groups were created.
2300 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2301 chunk_size
, stripe_size
);
2304 ret
= __finish_chunk_alloc(trans
, extent_root
, sys_map
,
2305 sys_chunk_offset
, sys_chunk_size
,
2311 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
2313 struct extent_map
*em
;
2314 struct map_lookup
*map
;
2315 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2319 spin_lock(&map_tree
->map_tree
.lock
);
2320 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2321 spin_unlock(&map_tree
->map_tree
.lock
);
2325 map
= (struct map_lookup
*)em
->bdev
;
2326 for (i
= 0; i
< map
->num_stripes
; i
++) {
2327 if (!map
->stripes
[i
].dev
->writeable
) {
2332 free_extent_map(em
);
2336 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
2338 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
2341 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
2343 struct extent_map
*em
;
2346 spin_lock(&tree
->map_tree
.lock
);
2347 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
2349 remove_extent_mapping(&tree
->map_tree
, em
);
2350 spin_unlock(&tree
->map_tree
.lock
);
2355 free_extent_map(em
);
2356 /* once for the tree */
2357 free_extent_map(em
);
2361 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
2363 struct extent_map
*em
;
2364 struct map_lookup
*map
;
2365 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2368 spin_lock(&em_tree
->lock
);
2369 em
= lookup_extent_mapping(em_tree
, logical
, len
);
2370 spin_unlock(&em_tree
->lock
);
2373 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2374 map
= (struct map_lookup
*)em
->bdev
;
2375 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
2376 ret
= map
->num_stripes
;
2377 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2378 ret
= map
->sub_stripes
;
2381 free_extent_map(em
);
2385 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
2389 if (map
->stripes
[optimal
].dev
->bdev
)
2391 for (i
= first
; i
< first
+ num
; i
++) {
2392 if (map
->stripes
[i
].dev
->bdev
)
2395 /* we couldn't find one that doesn't fail. Just return something
2396 * and the io error handling code will clean up eventually
2401 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2402 u64 logical
, u64
*length
,
2403 struct btrfs_multi_bio
**multi_ret
,
2404 int mirror_num
, struct page
*unplug_page
)
2406 struct extent_map
*em
;
2407 struct map_lookup
*map
;
2408 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2412 int stripes_allocated
= 8;
2413 int stripes_required
= 1;
2418 struct btrfs_multi_bio
*multi
= NULL
;
2420 if (multi_ret
&& !(rw
& (1 << BIO_RW
)))
2421 stripes_allocated
= 1;
2424 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
2429 atomic_set(&multi
->error
, 0);
2432 spin_lock(&em_tree
->lock
);
2433 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
2434 spin_unlock(&em_tree
->lock
);
2436 if (!em
&& unplug_page
)
2440 printk(KERN_CRIT
"unable to find logical %llu len %llu\n",
2441 (unsigned long long)logical
,
2442 (unsigned long long)*length
);
2446 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2447 map
= (struct map_lookup
*)em
->bdev
;
2448 offset
= logical
- em
->start
;
2450 if (mirror_num
> map
->num_stripes
)
2453 /* if our multi bio struct is too small, back off and try again */
2454 if (rw
& (1 << BIO_RW
)) {
2455 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
2456 BTRFS_BLOCK_GROUP_DUP
)) {
2457 stripes_required
= map
->num_stripes
;
2459 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2460 stripes_required
= map
->sub_stripes
;
2464 if (multi_ret
&& rw
== WRITE
&&
2465 stripes_allocated
< stripes_required
) {
2466 stripes_allocated
= map
->num_stripes
;
2467 free_extent_map(em
);
2473 * stripe_nr counts the total number of stripes we have to stride
2474 * to get to this block
2476 do_div(stripe_nr
, map
->stripe_len
);
2478 stripe_offset
= stripe_nr
* map
->stripe_len
;
2479 BUG_ON(offset
< stripe_offset
);
2481 /* stripe_offset is the offset of this block in its stripe*/
2482 stripe_offset
= offset
- stripe_offset
;
2484 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
2485 BTRFS_BLOCK_GROUP_RAID10
|
2486 BTRFS_BLOCK_GROUP_DUP
)) {
2487 /* we limit the length of each bio to what fits in a stripe */
2488 *length
= min_t(u64
, em
->len
- offset
,
2489 map
->stripe_len
- stripe_offset
);
2491 *length
= em
->len
- offset
;
2494 if (!multi_ret
&& !unplug_page
)
2499 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2500 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2501 num_stripes
= map
->num_stripes
;
2502 else if (mirror_num
)
2503 stripe_index
= mirror_num
- 1;
2505 stripe_index
= find_live_mirror(map
, 0,
2507 current
->pid
% map
->num_stripes
);
2510 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2511 if (rw
& (1 << BIO_RW
))
2512 num_stripes
= map
->num_stripes
;
2513 else if (mirror_num
)
2514 stripe_index
= mirror_num
- 1;
2516 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2517 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2519 stripe_index
= do_div(stripe_nr
, factor
);
2520 stripe_index
*= map
->sub_stripes
;
2522 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2523 num_stripes
= map
->sub_stripes
;
2524 else if (mirror_num
)
2525 stripe_index
+= mirror_num
- 1;
2527 stripe_index
= find_live_mirror(map
, stripe_index
,
2528 map
->sub_stripes
, stripe_index
+
2529 current
->pid
% map
->sub_stripes
);
2533 * after this do_div call, stripe_nr is the number of stripes
2534 * on this device we have to walk to find the data, and
2535 * stripe_index is the number of our device in the stripe array
2537 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
2539 BUG_ON(stripe_index
>= map
->num_stripes
);
2541 for (i
= 0; i
< num_stripes
; i
++) {
2543 struct btrfs_device
*device
;
2544 struct backing_dev_info
*bdi
;
2546 device
= map
->stripes
[stripe_index
].dev
;
2548 bdi
= blk_get_backing_dev_info(device
->bdev
);
2549 if (bdi
->unplug_io_fn
)
2550 bdi
->unplug_io_fn(bdi
, unplug_page
);
2553 multi
->stripes
[i
].physical
=
2554 map
->stripes
[stripe_index
].physical
+
2555 stripe_offset
+ stripe_nr
* map
->stripe_len
;
2556 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
2562 multi
->num_stripes
= num_stripes
;
2563 multi
->max_errors
= max_errors
;
2566 free_extent_map(em
);
2570 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2571 u64 logical
, u64
*length
,
2572 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
2574 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
2578 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
2579 u64 chunk_start
, u64 physical
, u64 devid
,
2580 u64
**logical
, int *naddrs
, int *stripe_len
)
2582 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2583 struct extent_map
*em
;
2584 struct map_lookup
*map
;
2591 spin_lock(&em_tree
->lock
);
2592 em
= lookup_extent_mapping(em_tree
, chunk_start
, 1);
2593 spin_unlock(&em_tree
->lock
);
2595 BUG_ON(!em
|| em
->start
!= chunk_start
);
2596 map
= (struct map_lookup
*)em
->bdev
;
2599 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2600 do_div(length
, map
->num_stripes
/ map
->sub_stripes
);
2601 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
2602 do_div(length
, map
->num_stripes
);
2604 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
2607 for (i
= 0; i
< map
->num_stripes
; i
++) {
2608 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
2610 if (map
->stripes
[i
].physical
> physical
||
2611 map
->stripes
[i
].physical
+ length
<= physical
)
2614 stripe_nr
= physical
- map
->stripes
[i
].physical
;
2615 do_div(stripe_nr
, map
->stripe_len
);
2617 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2618 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
2619 do_div(stripe_nr
, map
->sub_stripes
);
2620 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2621 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
2623 bytenr
= chunk_start
+ stripe_nr
* map
->stripe_len
;
2624 WARN_ON(nr
>= map
->num_stripes
);
2625 for (j
= 0; j
< nr
; j
++) {
2626 if (buf
[j
] == bytenr
)
2630 WARN_ON(nr
>= map
->num_stripes
);
2635 for (i
= 0; i
> nr
; i
++) {
2636 struct btrfs_multi_bio
*multi
;
2637 struct btrfs_bio_stripe
*stripe
;
2641 ret
= btrfs_map_block(map_tree
, WRITE
, buf
[i
],
2642 &length
, &multi
, 0);
2645 stripe
= multi
->stripes
;
2646 for (j
= 0; j
< multi
->num_stripes
; j
++) {
2647 if (stripe
->physical
>= physical
&&
2648 physical
< stripe
->physical
+ length
)
2651 BUG_ON(j
>= multi
->num_stripes
);
2657 *stripe_len
= map
->stripe_len
;
2659 free_extent_map(em
);
2663 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
2664 u64 logical
, struct page
*page
)
2666 u64 length
= PAGE_CACHE_SIZE
;
2667 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
2671 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
2673 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
2674 int is_orig_bio
= 0;
2677 atomic_inc(&multi
->error
);
2679 if (bio
== multi
->orig_bio
)
2682 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
2685 bio
= multi
->orig_bio
;
2687 bio
->bi_private
= multi
->private;
2688 bio
->bi_end_io
= multi
->end_io
;
2689 /* only send an error to the higher layers if it is
2690 * beyond the tolerance of the multi-bio
2692 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
2696 * this bio is actually up to date, we didn't
2697 * go over the max number of errors
2699 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2704 bio_endio(bio
, err
);
2705 } else if (!is_orig_bio
) {
2710 struct async_sched
{
2713 struct btrfs_fs_info
*info
;
2714 struct btrfs_work work
;
2718 * see run_scheduled_bios for a description of why bios are collected for
2721 * This will add one bio to the pending list for a device and make sure
2722 * the work struct is scheduled.
2724 static noinline
int schedule_bio(struct btrfs_root
*root
,
2725 struct btrfs_device
*device
,
2726 int rw
, struct bio
*bio
)
2728 int should_queue
= 1;
2730 /* don't bother with additional async steps for reads, right now */
2731 if (!(rw
& (1 << BIO_RW
))) {
2733 submit_bio(rw
, bio
);
2739 * nr_async_bios allows us to reliably return congestion to the
2740 * higher layers. Otherwise, the async bio makes it appear we have
2741 * made progress against dirty pages when we've really just put it
2742 * on a queue for later
2744 atomic_inc(&root
->fs_info
->nr_async_bios
);
2745 WARN_ON(bio
->bi_next
);
2746 bio
->bi_next
= NULL
;
2749 spin_lock(&device
->io_lock
);
2751 if (device
->pending_bio_tail
)
2752 device
->pending_bio_tail
->bi_next
= bio
;
2754 device
->pending_bio_tail
= bio
;
2755 if (!device
->pending_bios
)
2756 device
->pending_bios
= bio
;
2757 if (device
->running_pending
)
2760 spin_unlock(&device
->io_lock
);
2763 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
2768 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
2769 int mirror_num
, int async_submit
)
2771 struct btrfs_mapping_tree
*map_tree
;
2772 struct btrfs_device
*dev
;
2773 struct bio
*first_bio
= bio
;
2774 u64 logical
= (u64
)bio
->bi_sector
<< 9;
2777 struct btrfs_multi_bio
*multi
= NULL
;
2782 length
= bio
->bi_size
;
2783 map_tree
= &root
->fs_info
->mapping_tree
;
2784 map_length
= length
;
2786 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
2790 total_devs
= multi
->num_stripes
;
2791 if (map_length
< length
) {
2792 printk(KERN_CRIT
"mapping failed logical %llu bio len %llu "
2793 "len %llu\n", (unsigned long long)logical
,
2794 (unsigned long long)length
,
2795 (unsigned long long)map_length
);
2798 multi
->end_io
= first_bio
->bi_end_io
;
2799 multi
->private = first_bio
->bi_private
;
2800 multi
->orig_bio
= first_bio
;
2801 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
2803 while (dev_nr
< total_devs
) {
2804 if (total_devs
> 1) {
2805 if (dev_nr
< total_devs
- 1) {
2806 bio
= bio_clone(first_bio
, GFP_NOFS
);
2811 bio
->bi_private
= multi
;
2812 bio
->bi_end_io
= end_bio_multi_stripe
;
2814 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
2815 dev
= multi
->stripes
[dev_nr
].dev
;
2816 BUG_ON(rw
== WRITE
&& !dev
->writeable
);
2817 if (dev
&& dev
->bdev
) {
2818 bio
->bi_bdev
= dev
->bdev
;
2820 schedule_bio(root
, dev
, rw
, bio
);
2822 submit_bio(rw
, bio
);
2824 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
2825 bio
->bi_sector
= logical
>> 9;
2826 bio_endio(bio
, -EIO
);
2830 if (total_devs
== 1)
2835 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
2838 struct btrfs_device
*device
;
2839 struct btrfs_fs_devices
*cur_devices
;
2841 cur_devices
= root
->fs_info
->fs_devices
;
2842 while (cur_devices
) {
2844 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
2845 device
= __find_device(&cur_devices
->devices
,
2850 cur_devices
= cur_devices
->seed
;
2855 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
2856 u64 devid
, u8
*dev_uuid
)
2858 struct btrfs_device
*device
;
2859 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
2861 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
2864 list_add(&device
->dev_list
,
2865 &fs_devices
->devices
);
2866 device
->barriers
= 1;
2867 device
->dev_root
= root
->fs_info
->dev_root
;
2868 device
->devid
= devid
;
2869 device
->work
.func
= pending_bios_fn
;
2870 device
->fs_devices
= fs_devices
;
2871 fs_devices
->num_devices
++;
2872 spin_lock_init(&device
->io_lock
);
2873 INIT_LIST_HEAD(&device
->dev_alloc_list
);
2874 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
2878 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
2879 struct extent_buffer
*leaf
,
2880 struct btrfs_chunk
*chunk
)
2882 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2883 struct map_lookup
*map
;
2884 struct extent_map
*em
;
2888 u8 uuid
[BTRFS_UUID_SIZE
];
2893 logical
= key
->offset
;
2894 length
= btrfs_chunk_length(leaf
, chunk
);
2896 spin_lock(&map_tree
->map_tree
.lock
);
2897 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
2898 spin_unlock(&map_tree
->map_tree
.lock
);
2900 /* already mapped? */
2901 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
2902 free_extent_map(em
);
2905 free_extent_map(em
);
2908 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
2912 em
= alloc_extent_map(GFP_NOFS
);
2915 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2916 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2918 free_extent_map(em
);
2922 em
->bdev
= (struct block_device
*)map
;
2923 em
->start
= logical
;
2925 em
->block_start
= 0;
2926 em
->block_len
= em
->len
;
2928 map
->num_stripes
= num_stripes
;
2929 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
2930 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
2931 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
2932 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
2933 map
->type
= btrfs_chunk_type(leaf
, chunk
);
2934 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
2935 for (i
= 0; i
< num_stripes
; i
++) {
2936 map
->stripes
[i
].physical
=
2937 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
2938 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
2939 read_extent_buffer(leaf
, uuid
, (unsigned long)
2940 btrfs_stripe_dev_uuid_nr(chunk
, i
),
2942 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
2944 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
2946 free_extent_map(em
);
2949 if (!map
->stripes
[i
].dev
) {
2950 map
->stripes
[i
].dev
=
2951 add_missing_dev(root
, devid
, uuid
);
2952 if (!map
->stripes
[i
].dev
) {
2954 free_extent_map(em
);
2958 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
2961 spin_lock(&map_tree
->map_tree
.lock
);
2962 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
2963 spin_unlock(&map_tree
->map_tree
.lock
);
2965 free_extent_map(em
);
2970 static int fill_device_from_item(struct extent_buffer
*leaf
,
2971 struct btrfs_dev_item
*dev_item
,
2972 struct btrfs_device
*device
)
2976 device
->devid
= btrfs_device_id(leaf
, dev_item
);
2977 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
2978 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
2979 device
->type
= btrfs_device_type(leaf
, dev_item
);
2980 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
2981 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
2982 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
2984 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
2985 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
2990 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
2992 struct btrfs_fs_devices
*fs_devices
;
2995 mutex_lock(&uuid_mutex
);
2997 fs_devices
= root
->fs_info
->fs_devices
->seed
;
2998 while (fs_devices
) {
2999 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3003 fs_devices
= fs_devices
->seed
;
3006 fs_devices
= find_fsid(fsid
);
3012 fs_devices
= clone_fs_devices(fs_devices
);
3013 if (IS_ERR(fs_devices
)) {
3014 ret
= PTR_ERR(fs_devices
);
3018 ret
= __btrfs_open_devices(fs_devices
, FMODE_READ
,
3019 root
->fs_info
->bdev_holder
);
3023 if (!fs_devices
->seeding
) {
3024 __btrfs_close_devices(fs_devices
);
3025 free_fs_devices(fs_devices
);
3030 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
3031 root
->fs_info
->fs_devices
->seed
= fs_devices
;
3033 mutex_unlock(&uuid_mutex
);
3037 static int read_one_dev(struct btrfs_root
*root
,
3038 struct extent_buffer
*leaf
,
3039 struct btrfs_dev_item
*dev_item
)
3041 struct btrfs_device
*device
;
3044 u8 fs_uuid
[BTRFS_UUID_SIZE
];
3045 u8 dev_uuid
[BTRFS_UUID_SIZE
];
3047 devid
= btrfs_device_id(leaf
, dev_item
);
3048 read_extent_buffer(leaf
, dev_uuid
,
3049 (unsigned long)btrfs_device_uuid(dev_item
),
3051 read_extent_buffer(leaf
, fs_uuid
,
3052 (unsigned long)btrfs_device_fsid(dev_item
),
3055 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
3056 ret
= open_seed_devices(root
, fs_uuid
);
3057 if (ret
&& !btrfs_test_opt(root
, DEGRADED
))
3061 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
3062 if (!device
|| !device
->bdev
) {
3063 if (!btrfs_test_opt(root
, DEGRADED
))
3067 printk(KERN_WARNING
"warning devid %llu missing\n",
3068 (unsigned long long)devid
);
3069 device
= add_missing_dev(root
, devid
, dev_uuid
);
3075 if (device
->fs_devices
!= root
->fs_info
->fs_devices
) {
3076 BUG_ON(device
->writeable
);
3077 if (device
->generation
!=
3078 btrfs_device_generation(leaf
, dev_item
))
3082 fill_device_from_item(leaf
, dev_item
, device
);
3083 device
->dev_root
= root
->fs_info
->dev_root
;
3084 device
->in_fs_metadata
= 1;
3085 if (device
->writeable
)
3086 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
3091 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
3093 struct btrfs_dev_item
*dev_item
;
3095 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
3097 return read_one_dev(root
, buf
, dev_item
);
3100 int btrfs_read_sys_array(struct btrfs_root
*root
)
3102 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
3103 struct extent_buffer
*sb
;
3104 struct btrfs_disk_key
*disk_key
;
3105 struct btrfs_chunk
*chunk
;
3107 unsigned long sb_ptr
;
3113 struct btrfs_key key
;
3115 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
3116 BTRFS_SUPER_INFO_SIZE
);
3119 btrfs_set_buffer_uptodate(sb
);
3120 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
3121 array_size
= btrfs_super_sys_array_size(super_copy
);
3123 ptr
= super_copy
->sys_chunk_array
;
3124 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
3127 while (cur
< array_size
) {
3128 disk_key
= (struct btrfs_disk_key
*)ptr
;
3129 btrfs_disk_key_to_cpu(&key
, disk_key
);
3131 len
= sizeof(*disk_key
); ptr
+= len
;
3135 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3136 chunk
= (struct btrfs_chunk
*)sb_ptr
;
3137 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
3140 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
3141 len
= btrfs_chunk_item_size(num_stripes
);
3150 free_extent_buffer(sb
);
3154 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
3156 struct btrfs_path
*path
;
3157 struct extent_buffer
*leaf
;
3158 struct btrfs_key key
;
3159 struct btrfs_key found_key
;
3163 root
= root
->fs_info
->chunk_root
;
3165 path
= btrfs_alloc_path();
3169 /* first we search for all of the device items, and then we
3170 * read in all of the chunk items. This way we can create chunk
3171 * mappings that reference all of the devices that are afound
3173 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3177 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3179 leaf
= path
->nodes
[0];
3180 slot
= path
->slots
[0];
3181 if (slot
>= btrfs_header_nritems(leaf
)) {
3182 ret
= btrfs_next_leaf(root
, path
);
3189 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3190 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3191 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
3193 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
3194 struct btrfs_dev_item
*dev_item
;
3195 dev_item
= btrfs_item_ptr(leaf
, slot
,
3196 struct btrfs_dev_item
);
3197 ret
= read_one_dev(root
, leaf
, dev_item
);
3201 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3202 struct btrfs_chunk
*chunk
;
3203 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3204 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
3210 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3212 btrfs_release_path(root
, path
);
3217 btrfs_free_path(path
);