2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/iocontext.h>
24 #include <asm/div64.h>
27 #include "extent_map.h"
29 #include "transaction.h"
30 #include "print-tree.h"
32 #include "async-thread.h"
42 struct btrfs_bio_stripe stripes
[];
45 static int init_first_rw_device(struct btrfs_trans_handle
*trans
,
46 struct btrfs_root
*root
,
47 struct btrfs_device
*device
);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
);
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static DEFINE_MUTEX(uuid_mutex
);
54 static LIST_HEAD(fs_uuids
);
56 void btrfs_lock_volumes(void)
58 mutex_lock(&uuid_mutex
);
61 void btrfs_unlock_volumes(void)
63 mutex_unlock(&uuid_mutex
);
66 static void lock_chunks(struct btrfs_root
*root
)
68 mutex_lock(&root
->fs_info
->chunk_mutex
);
71 static void unlock_chunks(struct btrfs_root
*root
)
73 mutex_unlock(&root
->fs_info
->chunk_mutex
);
76 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
78 struct btrfs_device
*device
;
79 WARN_ON(fs_devices
->opened
);
80 while (!list_empty(&fs_devices
->devices
)) {
81 device
= list_entry(fs_devices
->devices
.next
,
82 struct btrfs_device
, dev_list
);
83 list_del(&device
->dev_list
);
90 int btrfs_cleanup_fs_uuids(void)
92 struct btrfs_fs_devices
*fs_devices
;
94 while (!list_empty(&fs_uuids
)) {
95 fs_devices
= list_entry(fs_uuids
.next
,
96 struct btrfs_fs_devices
, list
);
97 list_del(&fs_devices
->list
);
98 free_fs_devices(fs_devices
);
103 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
106 struct btrfs_device
*dev
;
108 list_for_each_entry(dev
, head
, dev_list
) {
109 if (dev
->devid
== devid
&&
110 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
117 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
119 struct btrfs_fs_devices
*fs_devices
;
121 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
122 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
129 * we try to collect pending bios for a device so we don't get a large
130 * number of procs sending bios down to the same device. This greatly
131 * improves the schedulers ability to collect and merge the bios.
133 * But, it also turns into a long list of bios to process and that is sure
134 * to eventually make the worker thread block. The solution here is to
135 * make some progress and then put this work struct back at the end of
136 * the list if the block device is congested. This way, multiple devices
137 * can make progress from a single worker thread.
139 static noinline
int run_scheduled_bios(struct btrfs_device
*device
)
142 struct backing_dev_info
*bdi
;
143 struct btrfs_fs_info
*fs_info
;
147 unsigned long num_run
= 0;
149 unsigned long last_waited
= 0;
151 bdi
= blk_get_backing_dev_info(device
->bdev
);
152 fs_info
= device
->dev_root
->fs_info
;
153 limit
= btrfs_async_submit_limit(fs_info
);
154 limit
= limit
* 2 / 3;
157 spin_lock(&device
->io_lock
);
160 /* take all the bios off the list at once and process them
161 * later on (without the lock held). But, remember the
162 * tail and other pointers so the bios can be properly reinserted
163 * into the list if we hit congestion
165 pending
= device
->pending_bios
;
166 tail
= device
->pending_bio_tail
;
167 WARN_ON(pending
&& !tail
);
168 device
->pending_bios
= NULL
;
169 device
->pending_bio_tail
= NULL
;
172 * if pending was null this time around, no bios need processing
173 * at all and we can stop. Otherwise it'll loop back up again
174 * and do an additional check so no bios are missed.
176 * device->running_pending is used to synchronize with the
181 device
->running_pending
= 1;
184 device
->running_pending
= 0;
186 spin_unlock(&device
->io_lock
);
190 pending
= pending
->bi_next
;
192 atomic_dec(&fs_info
->nr_async_bios
);
194 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
195 waitqueue_active(&fs_info
->async_submit_wait
))
196 wake_up(&fs_info
->async_submit_wait
);
198 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
200 submit_bio(cur
->bi_rw
, cur
);
205 * we made progress, there is more work to do and the bdi
206 * is now congested. Back off and let other work structs
209 if (pending
&& bdi_write_congested(bdi
) && num_run
> 16 &&
210 fs_info
->fs_devices
->open_devices
> 1) {
211 struct bio
*old_head
;
212 struct io_context
*ioc
;
214 ioc
= current
->io_context
;
217 * the main goal here is that we don't want to
218 * block if we're going to be able to submit
219 * more requests without blocking.
221 * This code does two great things, it pokes into
222 * the elevator code from a filesystem _and_
223 * it makes assumptions about how batching works.
225 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
226 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
228 ioc
->last_waited
== last_waited
)) {
230 * we want to go through our batch of
231 * requests and stop. So, we copy out
232 * the ioc->last_waited time and test
233 * against it before looping
235 last_waited
= ioc
->last_waited
;
238 spin_lock(&device
->io_lock
);
240 old_head
= device
->pending_bios
;
241 device
->pending_bios
= pending
;
242 if (device
->pending_bio_tail
)
243 tail
->bi_next
= old_head
;
245 device
->pending_bio_tail
= tail
;
247 device
->running_pending
= 1;
249 spin_unlock(&device
->io_lock
);
250 btrfs_requeue_work(&device
->work
);
257 spin_lock(&device
->io_lock
);
258 if (device
->pending_bios
)
260 spin_unlock(&device
->io_lock
);
263 * IO has already been through a long path to get here. Checksumming,
264 * async helper threads, perhaps compression. We've done a pretty
265 * good job of collecting a batch of IO and should just unplug
266 * the device right away.
268 * This will help anyone who is waiting on the IO, they might have
269 * already unplugged, but managed to do so before the bio they
270 * cared about found its way down here.
272 blk_run_backing_dev(bdi
, NULL
);
277 static void pending_bios_fn(struct btrfs_work
*work
)
279 struct btrfs_device
*device
;
281 device
= container_of(work
, struct btrfs_device
, work
);
282 run_scheduled_bios(device
);
285 static noinline
int device_list_add(const char *path
,
286 struct btrfs_super_block
*disk_super
,
287 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
289 struct btrfs_device
*device
;
290 struct btrfs_fs_devices
*fs_devices
;
291 u64 found_transid
= btrfs_super_generation(disk_super
);
293 fs_devices
= find_fsid(disk_super
->fsid
);
295 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
298 INIT_LIST_HEAD(&fs_devices
->devices
);
299 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
300 list_add(&fs_devices
->list
, &fs_uuids
);
301 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
302 fs_devices
->latest_devid
= devid
;
303 fs_devices
->latest_trans
= found_transid
;
306 device
= __find_device(&fs_devices
->devices
, devid
,
307 disk_super
->dev_item
.uuid
);
310 if (fs_devices
->opened
)
313 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
315 /* we can safely leave the fs_devices entry around */
318 device
->devid
= devid
;
319 device
->work
.func
= pending_bios_fn
;
320 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
322 device
->barriers
= 1;
323 spin_lock_init(&device
->io_lock
);
324 device
->name
= kstrdup(path
, GFP_NOFS
);
329 INIT_LIST_HEAD(&device
->dev_alloc_list
);
330 list_add(&device
->dev_list
, &fs_devices
->devices
);
331 device
->fs_devices
= fs_devices
;
332 fs_devices
->num_devices
++;
335 if (found_transid
> fs_devices
->latest_trans
) {
336 fs_devices
->latest_devid
= devid
;
337 fs_devices
->latest_trans
= found_transid
;
339 *fs_devices_ret
= fs_devices
;
343 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
345 struct btrfs_fs_devices
*fs_devices
;
346 struct btrfs_device
*device
;
347 struct btrfs_device
*orig_dev
;
349 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
351 return ERR_PTR(-ENOMEM
);
353 INIT_LIST_HEAD(&fs_devices
->devices
);
354 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
355 INIT_LIST_HEAD(&fs_devices
->list
);
356 fs_devices
->latest_devid
= orig
->latest_devid
;
357 fs_devices
->latest_trans
= orig
->latest_trans
;
358 memcpy(fs_devices
->fsid
, orig
->fsid
, sizeof(fs_devices
->fsid
));
360 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
361 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
365 device
->name
= kstrdup(orig_dev
->name
, GFP_NOFS
);
369 device
->devid
= orig_dev
->devid
;
370 device
->work
.func
= pending_bios_fn
;
371 memcpy(device
->uuid
, orig_dev
->uuid
, sizeof(device
->uuid
));
372 device
->barriers
= 1;
373 spin_lock_init(&device
->io_lock
);
374 INIT_LIST_HEAD(&device
->dev_list
);
375 INIT_LIST_HEAD(&device
->dev_alloc_list
);
377 list_add(&device
->dev_list
, &fs_devices
->devices
);
378 device
->fs_devices
= fs_devices
;
379 fs_devices
->num_devices
++;
383 free_fs_devices(fs_devices
);
384 return ERR_PTR(-ENOMEM
);
387 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
389 struct btrfs_device
*device
, *next
;
391 mutex_lock(&uuid_mutex
);
393 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
394 if (device
->in_fs_metadata
)
398 close_bdev_exclusive(device
->bdev
, device
->mode
);
400 fs_devices
->open_devices
--;
402 if (device
->writeable
) {
403 list_del_init(&device
->dev_alloc_list
);
404 device
->writeable
= 0;
405 fs_devices
->rw_devices
--;
407 list_del_init(&device
->dev_list
);
408 fs_devices
->num_devices
--;
413 if (fs_devices
->seed
) {
414 fs_devices
= fs_devices
->seed
;
418 mutex_unlock(&uuid_mutex
);
422 static int __btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
424 struct btrfs_device
*device
;
426 if (--fs_devices
->opened
> 0)
429 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
431 close_bdev_exclusive(device
->bdev
, device
->mode
);
432 fs_devices
->open_devices
--;
434 if (device
->writeable
) {
435 list_del_init(&device
->dev_alloc_list
);
436 fs_devices
->rw_devices
--;
440 device
->writeable
= 0;
441 device
->in_fs_metadata
= 0;
443 WARN_ON(fs_devices
->open_devices
);
444 WARN_ON(fs_devices
->rw_devices
);
445 fs_devices
->opened
= 0;
446 fs_devices
->seeding
= 0;
451 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
453 struct btrfs_fs_devices
*seed_devices
= NULL
;
456 mutex_lock(&uuid_mutex
);
457 ret
= __btrfs_close_devices(fs_devices
);
458 if (!fs_devices
->opened
) {
459 seed_devices
= fs_devices
->seed
;
460 fs_devices
->seed
= NULL
;
462 mutex_unlock(&uuid_mutex
);
464 while (seed_devices
) {
465 fs_devices
= seed_devices
;
466 seed_devices
= fs_devices
->seed
;
467 __btrfs_close_devices(fs_devices
);
468 free_fs_devices(fs_devices
);
473 static int __btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
474 fmode_t flags
, void *holder
)
476 struct block_device
*bdev
;
477 struct list_head
*head
= &fs_devices
->devices
;
478 struct btrfs_device
*device
;
479 struct block_device
*latest_bdev
= NULL
;
480 struct buffer_head
*bh
;
481 struct btrfs_super_block
*disk_super
;
482 u64 latest_devid
= 0;
483 u64 latest_transid
= 0;
488 list_for_each_entry(device
, head
, dev_list
) {
494 bdev
= open_bdev_exclusive(device
->name
, flags
, holder
);
496 printk(KERN_INFO
"open %s failed\n", device
->name
);
499 set_blocksize(bdev
, 4096);
501 bh
= btrfs_read_dev_super(bdev
);
505 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
506 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
507 if (devid
!= device
->devid
)
510 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
,
514 device
->generation
= btrfs_super_generation(disk_super
);
515 if (!latest_transid
|| device
->generation
> latest_transid
) {
516 latest_devid
= devid
;
517 latest_transid
= device
->generation
;
521 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
522 device
->writeable
= 0;
524 device
->writeable
= !bdev_read_only(bdev
);
529 device
->in_fs_metadata
= 0;
530 device
->mode
= flags
;
532 fs_devices
->open_devices
++;
533 if (device
->writeable
) {
534 fs_devices
->rw_devices
++;
535 list_add(&device
->dev_alloc_list
,
536 &fs_devices
->alloc_list
);
543 close_bdev_exclusive(bdev
, FMODE_READ
);
547 if (fs_devices
->open_devices
== 0) {
551 fs_devices
->seeding
= seeding
;
552 fs_devices
->opened
= 1;
553 fs_devices
->latest_bdev
= latest_bdev
;
554 fs_devices
->latest_devid
= latest_devid
;
555 fs_devices
->latest_trans
= latest_transid
;
556 fs_devices
->total_rw_bytes
= 0;
561 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
562 fmode_t flags
, void *holder
)
566 mutex_lock(&uuid_mutex
);
567 if (fs_devices
->opened
) {
568 fs_devices
->opened
++;
571 ret
= __btrfs_open_devices(fs_devices
, flags
, holder
);
573 mutex_unlock(&uuid_mutex
);
577 int btrfs_scan_one_device(const char *path
, fmode_t flags
, void *holder
,
578 struct btrfs_fs_devices
**fs_devices_ret
)
580 struct btrfs_super_block
*disk_super
;
581 struct block_device
*bdev
;
582 struct buffer_head
*bh
;
587 mutex_lock(&uuid_mutex
);
589 bdev
= open_bdev_exclusive(path
, flags
, holder
);
596 ret
= set_blocksize(bdev
, 4096);
599 bh
= btrfs_read_dev_super(bdev
);
604 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
605 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
606 transid
= btrfs_super_generation(disk_super
);
607 if (disk_super
->label
[0])
608 printk(KERN_INFO
"device label %s ", disk_super
->label
);
610 /* FIXME, make a readl uuid parser */
611 printk(KERN_INFO
"device fsid %llx-%llx ",
612 *(unsigned long long *)disk_super
->fsid
,
613 *(unsigned long long *)(disk_super
->fsid
+ 8));
615 printk(KERN_CONT
"devid %llu transid %llu %s\n",
616 (unsigned long long)devid
, (unsigned long long)transid
, path
);
617 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
621 close_bdev_exclusive(bdev
, flags
);
623 mutex_unlock(&uuid_mutex
);
628 * this uses a pretty simple search, the expectation is that it is
629 * called very infrequently and that a given device has a small number
632 static noinline
int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
633 struct btrfs_device
*device
,
634 u64 num_bytes
, u64
*start
)
636 struct btrfs_key key
;
637 struct btrfs_root
*root
= device
->dev_root
;
638 struct btrfs_dev_extent
*dev_extent
= NULL
;
639 struct btrfs_path
*path
;
642 u64 search_start
= 0;
643 u64 search_end
= device
->total_bytes
;
647 struct extent_buffer
*l
;
649 path
= btrfs_alloc_path();
655 /* FIXME use last free of some kind */
657 /* we don't want to overwrite the superblock on the drive,
658 * so we make sure to start at an offset of at least 1MB
660 search_start
= max((u64
)1024 * 1024, search_start
);
662 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
663 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
665 key
.objectid
= device
->devid
;
666 key
.offset
= search_start
;
667 key
.type
= BTRFS_DEV_EXTENT_KEY
;
668 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
671 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
675 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
678 slot
= path
->slots
[0];
679 if (slot
>= btrfs_header_nritems(l
)) {
680 ret
= btrfs_next_leaf(root
, path
);
687 if (search_start
>= search_end
) {
691 *start
= search_start
;
695 *start
= last_byte
> search_start
?
696 last_byte
: search_start
;
697 if (search_end
<= *start
) {
703 btrfs_item_key_to_cpu(l
, &key
, slot
);
705 if (key
.objectid
< device
->devid
)
708 if (key
.objectid
> device
->devid
)
711 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
713 if (last_byte
< search_start
)
714 last_byte
= search_start
;
715 hole_size
= key
.offset
- last_byte
;
716 if (key
.offset
> last_byte
&&
717 hole_size
>= num_bytes
) {
722 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
726 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
727 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
733 /* we have to make sure we didn't find an extent that has already
734 * been allocated by the map tree or the original allocation
736 BUG_ON(*start
< search_start
);
738 if (*start
+ num_bytes
> search_end
) {
742 /* check for pending inserts here */
746 btrfs_free_path(path
);
750 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
751 struct btrfs_device
*device
,
755 struct btrfs_path
*path
;
756 struct btrfs_root
*root
= device
->dev_root
;
757 struct btrfs_key key
;
758 struct btrfs_key found_key
;
759 struct extent_buffer
*leaf
= NULL
;
760 struct btrfs_dev_extent
*extent
= NULL
;
762 path
= btrfs_alloc_path();
766 key
.objectid
= device
->devid
;
768 key
.type
= BTRFS_DEV_EXTENT_KEY
;
770 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
772 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
773 BTRFS_DEV_EXTENT_KEY
);
775 leaf
= path
->nodes
[0];
776 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
777 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
778 struct btrfs_dev_extent
);
779 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
780 btrfs_dev_extent_length(leaf
, extent
) < start
);
782 } else if (ret
== 0) {
783 leaf
= path
->nodes
[0];
784 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
785 struct btrfs_dev_extent
);
789 if (device
->bytes_used
> 0)
790 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
791 ret
= btrfs_del_item(trans
, root
, path
);
794 btrfs_free_path(path
);
798 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
799 struct btrfs_device
*device
,
800 u64 chunk_tree
, u64 chunk_objectid
,
801 u64 chunk_offset
, u64 start
, u64 num_bytes
)
804 struct btrfs_path
*path
;
805 struct btrfs_root
*root
= device
->dev_root
;
806 struct btrfs_dev_extent
*extent
;
807 struct extent_buffer
*leaf
;
808 struct btrfs_key key
;
810 WARN_ON(!device
->in_fs_metadata
);
811 path
= btrfs_alloc_path();
815 key
.objectid
= device
->devid
;
817 key
.type
= BTRFS_DEV_EXTENT_KEY
;
818 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
822 leaf
= path
->nodes
[0];
823 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
824 struct btrfs_dev_extent
);
825 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
826 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
827 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
829 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
830 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
833 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
834 btrfs_mark_buffer_dirty(leaf
);
835 btrfs_free_path(path
);
839 static noinline
int find_next_chunk(struct btrfs_root
*root
,
840 u64 objectid
, u64
*offset
)
842 struct btrfs_path
*path
;
844 struct btrfs_key key
;
845 struct btrfs_chunk
*chunk
;
846 struct btrfs_key found_key
;
848 path
= btrfs_alloc_path();
851 key
.objectid
= objectid
;
852 key
.offset
= (u64
)-1;
853 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
855 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
861 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
865 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
867 if (found_key
.objectid
!= objectid
)
870 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
872 *offset
= found_key
.offset
+
873 btrfs_chunk_length(path
->nodes
[0], chunk
);
878 btrfs_free_path(path
);
882 static noinline
int find_next_devid(struct btrfs_root
*root
, u64
*objectid
)
885 struct btrfs_key key
;
886 struct btrfs_key found_key
;
887 struct btrfs_path
*path
;
889 root
= root
->fs_info
->chunk_root
;
891 path
= btrfs_alloc_path();
895 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
896 key
.type
= BTRFS_DEV_ITEM_KEY
;
897 key
.offset
= (u64
)-1;
899 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
905 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
910 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
912 *objectid
= found_key
.offset
+ 1;
916 btrfs_free_path(path
);
921 * the device information is stored in the chunk root
922 * the btrfs_device struct should be fully filled in
924 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
925 struct btrfs_root
*root
,
926 struct btrfs_device
*device
)
929 struct btrfs_path
*path
;
930 struct btrfs_dev_item
*dev_item
;
931 struct extent_buffer
*leaf
;
932 struct btrfs_key key
;
935 root
= root
->fs_info
->chunk_root
;
937 path
= btrfs_alloc_path();
941 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
942 key
.type
= BTRFS_DEV_ITEM_KEY
;
943 key
.offset
= device
->devid
;
945 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
950 leaf
= path
->nodes
[0];
951 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
953 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
954 btrfs_set_device_generation(leaf
, dev_item
, 0);
955 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
956 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
957 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
958 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
959 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
960 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
961 btrfs_set_device_group(leaf
, dev_item
, 0);
962 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
963 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
964 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
966 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
967 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
968 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
969 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
970 btrfs_mark_buffer_dirty(leaf
);
974 btrfs_free_path(path
);
978 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
979 struct btrfs_device
*device
)
982 struct btrfs_path
*path
;
983 struct btrfs_key key
;
984 struct btrfs_trans_handle
*trans
;
986 root
= root
->fs_info
->chunk_root
;
988 path
= btrfs_alloc_path();
992 trans
= btrfs_start_transaction(root
, 1);
993 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
994 key
.type
= BTRFS_DEV_ITEM_KEY
;
995 key
.offset
= device
->devid
;
998 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1007 ret
= btrfs_del_item(trans
, root
, path
);
1011 btrfs_free_path(path
);
1012 unlock_chunks(root
);
1013 btrfs_commit_transaction(trans
, root
);
1017 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
1019 struct btrfs_device
*device
;
1020 struct btrfs_device
*next_device
;
1021 struct block_device
*bdev
;
1022 struct buffer_head
*bh
= NULL
;
1023 struct btrfs_super_block
*disk_super
;
1030 mutex_lock(&uuid_mutex
);
1031 mutex_lock(&root
->fs_info
->volume_mutex
);
1033 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
1034 root
->fs_info
->avail_system_alloc_bits
|
1035 root
->fs_info
->avail_metadata_alloc_bits
;
1037 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
1038 root
->fs_info
->fs_devices
->rw_devices
<= 4) {
1039 printk(KERN_ERR
"btrfs: unable to go below four devices "
1045 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
1046 root
->fs_info
->fs_devices
->rw_devices
<= 2) {
1047 printk(KERN_ERR
"btrfs: unable to go below two "
1048 "devices on raid1\n");
1053 if (strcmp(device_path
, "missing") == 0) {
1054 struct list_head
*devices
;
1055 struct btrfs_device
*tmp
;
1058 devices
= &root
->fs_info
->fs_devices
->devices
;
1059 list_for_each_entry(tmp
, devices
, dev_list
) {
1060 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
1069 printk(KERN_ERR
"btrfs: no missing devices found to "
1074 bdev
= open_bdev_exclusive(device_path
, FMODE_READ
,
1075 root
->fs_info
->bdev_holder
);
1077 ret
= PTR_ERR(bdev
);
1081 set_blocksize(bdev
, 4096);
1082 bh
= btrfs_read_dev_super(bdev
);
1087 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
1088 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
1089 dev_uuid
= disk_super
->dev_item
.uuid
;
1090 device
= btrfs_find_device(root
, devid
, dev_uuid
,
1098 if (device
->writeable
&& root
->fs_info
->fs_devices
->rw_devices
== 1) {
1099 printk(KERN_ERR
"btrfs: unable to remove the only writeable "
1105 if (device
->writeable
) {
1106 list_del_init(&device
->dev_alloc_list
);
1107 root
->fs_info
->fs_devices
->rw_devices
--;
1110 ret
= btrfs_shrink_device(device
, 0);
1114 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
1118 device
->in_fs_metadata
= 0;
1119 list_del_init(&device
->dev_list
);
1120 device
->fs_devices
->num_devices
--;
1122 next_device
= list_entry(root
->fs_info
->fs_devices
->devices
.next
,
1123 struct btrfs_device
, dev_list
);
1124 if (device
->bdev
== root
->fs_info
->sb
->s_bdev
)
1125 root
->fs_info
->sb
->s_bdev
= next_device
->bdev
;
1126 if (device
->bdev
== root
->fs_info
->fs_devices
->latest_bdev
)
1127 root
->fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1130 close_bdev_exclusive(device
->bdev
, device
->mode
);
1131 device
->bdev
= NULL
;
1132 device
->fs_devices
->open_devices
--;
1135 num_devices
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1136 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
, num_devices
);
1138 if (device
->fs_devices
->open_devices
== 0) {
1139 struct btrfs_fs_devices
*fs_devices
;
1140 fs_devices
= root
->fs_info
->fs_devices
;
1141 while (fs_devices
) {
1142 if (fs_devices
->seed
== device
->fs_devices
)
1144 fs_devices
= fs_devices
->seed
;
1146 fs_devices
->seed
= device
->fs_devices
->seed
;
1147 device
->fs_devices
->seed
= NULL
;
1148 __btrfs_close_devices(device
->fs_devices
);
1149 free_fs_devices(device
->fs_devices
);
1153 * at this point, the device is zero sized. We want to
1154 * remove it from the devices list and zero out the old super
1156 if (device
->writeable
) {
1157 /* make sure this device isn't detected as part of
1160 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
1161 set_buffer_dirty(bh
);
1162 sync_dirty_buffer(bh
);
1165 kfree(device
->name
);
1173 close_bdev_exclusive(bdev
, FMODE_READ
);
1175 mutex_unlock(&root
->fs_info
->volume_mutex
);
1176 mutex_unlock(&uuid_mutex
);
1181 * does all the dirty work required for changing file system's UUID.
1183 static int btrfs_prepare_sprout(struct btrfs_trans_handle
*trans
,
1184 struct btrfs_root
*root
)
1186 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
1187 struct btrfs_fs_devices
*old_devices
;
1188 struct btrfs_fs_devices
*seed_devices
;
1189 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
1190 struct btrfs_device
*device
;
1193 BUG_ON(!mutex_is_locked(&uuid_mutex
));
1194 if (!fs_devices
->seeding
)
1197 seed_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1201 old_devices
= clone_fs_devices(fs_devices
);
1202 if (IS_ERR(old_devices
)) {
1203 kfree(seed_devices
);
1204 return PTR_ERR(old_devices
);
1207 list_add(&old_devices
->list
, &fs_uuids
);
1209 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
1210 seed_devices
->opened
= 1;
1211 INIT_LIST_HEAD(&seed_devices
->devices
);
1212 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
1213 list_splice_init(&fs_devices
->devices
, &seed_devices
->devices
);
1214 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
1215 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
) {
1216 device
->fs_devices
= seed_devices
;
1219 fs_devices
->seeding
= 0;
1220 fs_devices
->num_devices
= 0;
1221 fs_devices
->open_devices
= 0;
1222 fs_devices
->seed
= seed_devices
;
1224 generate_random_uuid(fs_devices
->fsid
);
1225 memcpy(root
->fs_info
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1226 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1227 super_flags
= btrfs_super_flags(disk_super
) &
1228 ~BTRFS_SUPER_FLAG_SEEDING
;
1229 btrfs_set_super_flags(disk_super
, super_flags
);
1235 * strore the expected generation for seed devices in device items.
1237 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
,
1238 struct btrfs_root
*root
)
1240 struct btrfs_path
*path
;
1241 struct extent_buffer
*leaf
;
1242 struct btrfs_dev_item
*dev_item
;
1243 struct btrfs_device
*device
;
1244 struct btrfs_key key
;
1245 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1246 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1250 path
= btrfs_alloc_path();
1254 root
= root
->fs_info
->chunk_root
;
1255 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1257 key
.type
= BTRFS_DEV_ITEM_KEY
;
1260 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1264 leaf
= path
->nodes
[0];
1266 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1267 ret
= btrfs_next_leaf(root
, path
);
1272 leaf
= path
->nodes
[0];
1273 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1274 btrfs_release_path(root
, path
);
1278 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1279 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
1280 key
.type
!= BTRFS_DEV_ITEM_KEY
)
1283 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1284 struct btrfs_dev_item
);
1285 devid
= btrfs_device_id(leaf
, dev_item
);
1286 read_extent_buffer(leaf
, dev_uuid
,
1287 (unsigned long)btrfs_device_uuid(dev_item
),
1289 read_extent_buffer(leaf
, fs_uuid
,
1290 (unsigned long)btrfs_device_fsid(dev_item
),
1292 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1295 if (device
->fs_devices
->seeding
) {
1296 btrfs_set_device_generation(leaf
, dev_item
,
1297 device
->generation
);
1298 btrfs_mark_buffer_dirty(leaf
);
1306 btrfs_free_path(path
);
1310 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1312 struct btrfs_trans_handle
*trans
;
1313 struct btrfs_device
*device
;
1314 struct block_device
*bdev
;
1315 struct list_head
*devices
;
1316 struct super_block
*sb
= root
->fs_info
->sb
;
1318 int seeding_dev
= 0;
1321 if ((sb
->s_flags
& MS_RDONLY
) && !root
->fs_info
->fs_devices
->seeding
)
1324 bdev
= open_bdev_exclusive(device_path
, 0, root
->fs_info
->bdev_holder
);
1328 if (root
->fs_info
->fs_devices
->seeding
) {
1330 down_write(&sb
->s_umount
);
1331 mutex_lock(&uuid_mutex
);
1334 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1335 mutex_lock(&root
->fs_info
->volume_mutex
);
1337 devices
= &root
->fs_info
->fs_devices
->devices
;
1338 list_for_each_entry(device
, devices
, dev_list
) {
1339 if (device
->bdev
== bdev
) {
1345 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1347 /* we can safely leave the fs_devices entry around */
1352 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1353 if (!device
->name
) {
1359 ret
= find_next_devid(root
, &device
->devid
);
1365 trans
= btrfs_start_transaction(root
, 1);
1368 device
->barriers
= 1;
1369 device
->writeable
= 1;
1370 device
->work
.func
= pending_bios_fn
;
1371 generate_random_uuid(device
->uuid
);
1372 spin_lock_init(&device
->io_lock
);
1373 device
->generation
= trans
->transid
;
1374 device
->io_width
= root
->sectorsize
;
1375 device
->io_align
= root
->sectorsize
;
1376 device
->sector_size
= root
->sectorsize
;
1377 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1378 device
->dev_root
= root
->fs_info
->dev_root
;
1379 device
->bdev
= bdev
;
1380 device
->in_fs_metadata
= 1;
1382 set_blocksize(device
->bdev
, 4096);
1385 sb
->s_flags
&= ~MS_RDONLY
;
1386 ret
= btrfs_prepare_sprout(trans
, root
);
1390 device
->fs_devices
= root
->fs_info
->fs_devices
;
1391 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1392 list_add(&device
->dev_alloc_list
,
1393 &root
->fs_info
->fs_devices
->alloc_list
);
1394 root
->fs_info
->fs_devices
->num_devices
++;
1395 root
->fs_info
->fs_devices
->open_devices
++;
1396 root
->fs_info
->fs_devices
->rw_devices
++;
1397 root
->fs_info
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
1399 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1400 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1401 total_bytes
+ device
->total_bytes
);
1403 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1404 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1408 ret
= init_first_rw_device(trans
, root
, device
);
1410 ret
= btrfs_finish_sprout(trans
, root
);
1413 ret
= btrfs_add_device(trans
, root
, device
);
1417 * we've got more storage, clear any full flags on the space
1420 btrfs_clear_space_info_full(root
->fs_info
);
1422 unlock_chunks(root
);
1423 btrfs_commit_transaction(trans
, root
);
1426 mutex_unlock(&uuid_mutex
);
1427 up_write(&sb
->s_umount
);
1429 ret
= btrfs_relocate_sys_chunks(root
);
1433 mutex_unlock(&root
->fs_info
->volume_mutex
);
1436 close_bdev_exclusive(bdev
, 0);
1438 mutex_unlock(&uuid_mutex
);
1439 up_write(&sb
->s_umount
);
1444 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
1445 struct btrfs_device
*device
)
1448 struct btrfs_path
*path
;
1449 struct btrfs_root
*root
;
1450 struct btrfs_dev_item
*dev_item
;
1451 struct extent_buffer
*leaf
;
1452 struct btrfs_key key
;
1454 root
= device
->dev_root
->fs_info
->chunk_root
;
1456 path
= btrfs_alloc_path();
1460 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1461 key
.type
= BTRFS_DEV_ITEM_KEY
;
1462 key
.offset
= device
->devid
;
1464 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1473 leaf
= path
->nodes
[0];
1474 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1476 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1477 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1478 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1479 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1480 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1481 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1482 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1483 btrfs_mark_buffer_dirty(leaf
);
1486 btrfs_free_path(path
);
1490 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1491 struct btrfs_device
*device
, u64 new_size
)
1493 struct btrfs_super_block
*super_copy
=
1494 &device
->dev_root
->fs_info
->super_copy
;
1495 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1496 u64 diff
= new_size
- device
->total_bytes
;
1498 if (!device
->writeable
)
1500 if (new_size
<= device
->total_bytes
)
1503 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1504 device
->fs_devices
->total_rw_bytes
+= diff
;
1506 device
->total_bytes
= new_size
;
1507 btrfs_clear_space_info_full(device
->dev_root
->fs_info
);
1509 return btrfs_update_device(trans
, device
);
1512 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1513 struct btrfs_device
*device
, u64 new_size
)
1516 lock_chunks(device
->dev_root
);
1517 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1518 unlock_chunks(device
->dev_root
);
1522 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1523 struct btrfs_root
*root
,
1524 u64 chunk_tree
, u64 chunk_objectid
,
1528 struct btrfs_path
*path
;
1529 struct btrfs_key key
;
1531 root
= root
->fs_info
->chunk_root
;
1532 path
= btrfs_alloc_path();
1536 key
.objectid
= chunk_objectid
;
1537 key
.offset
= chunk_offset
;
1538 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1540 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1543 ret
= btrfs_del_item(trans
, root
, path
);
1546 btrfs_free_path(path
);
1550 static int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1553 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1554 struct btrfs_disk_key
*disk_key
;
1555 struct btrfs_chunk
*chunk
;
1562 struct btrfs_key key
;
1564 array_size
= btrfs_super_sys_array_size(super_copy
);
1566 ptr
= super_copy
->sys_chunk_array
;
1569 while (cur
< array_size
) {
1570 disk_key
= (struct btrfs_disk_key
*)ptr
;
1571 btrfs_disk_key_to_cpu(&key
, disk_key
);
1573 len
= sizeof(*disk_key
);
1575 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1576 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1577 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1578 len
+= btrfs_chunk_item_size(num_stripes
);
1583 if (key
.objectid
== chunk_objectid
&&
1584 key
.offset
== chunk_offset
) {
1585 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1587 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1596 static int btrfs_relocate_chunk(struct btrfs_root
*root
,
1597 u64 chunk_tree
, u64 chunk_objectid
,
1600 struct extent_map_tree
*em_tree
;
1601 struct btrfs_root
*extent_root
;
1602 struct btrfs_trans_handle
*trans
;
1603 struct extent_map
*em
;
1604 struct map_lookup
*map
;
1608 printk(KERN_INFO
"btrfs relocating chunk %llu\n",
1609 (unsigned long long)chunk_offset
);
1610 root
= root
->fs_info
->chunk_root
;
1611 extent_root
= root
->fs_info
->extent_root
;
1612 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1614 /* step one, relocate all the extents inside this chunk */
1615 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1618 trans
= btrfs_start_transaction(root
, 1);
1624 * step two, delete the device extents and the
1625 * chunk tree entries
1627 spin_lock(&em_tree
->lock
);
1628 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1629 spin_unlock(&em_tree
->lock
);
1631 BUG_ON(em
->start
> chunk_offset
||
1632 em
->start
+ em
->len
< chunk_offset
);
1633 map
= (struct map_lookup
*)em
->bdev
;
1635 for (i
= 0; i
< map
->num_stripes
; i
++) {
1636 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1637 map
->stripes
[i
].physical
);
1640 if (map
->stripes
[i
].dev
) {
1641 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1645 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1650 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1651 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1655 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1658 spin_lock(&em_tree
->lock
);
1659 remove_extent_mapping(em_tree
, em
);
1660 spin_unlock(&em_tree
->lock
);
1665 /* once for the tree */
1666 free_extent_map(em
);
1668 free_extent_map(em
);
1670 unlock_chunks(root
);
1671 btrfs_end_transaction(trans
, root
);
1675 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
)
1677 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
1678 struct btrfs_path
*path
;
1679 struct extent_buffer
*leaf
;
1680 struct btrfs_chunk
*chunk
;
1681 struct btrfs_key key
;
1682 struct btrfs_key found_key
;
1683 u64 chunk_tree
= chunk_root
->root_key
.objectid
;
1687 path
= btrfs_alloc_path();
1691 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1692 key
.offset
= (u64
)-1;
1693 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1696 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1701 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
1708 leaf
= path
->nodes
[0];
1709 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1711 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
1712 struct btrfs_chunk
);
1713 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
1714 btrfs_release_path(chunk_root
, path
);
1716 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1717 ret
= btrfs_relocate_chunk(chunk_root
, chunk_tree
,
1723 if (found_key
.offset
== 0)
1725 key
.offset
= found_key
.offset
- 1;
1729 btrfs_free_path(path
);
1733 static u64
div_factor(u64 num
, int factor
)
1742 int btrfs_balance(struct btrfs_root
*dev_root
)
1745 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
1746 struct btrfs_device
*device
;
1749 struct btrfs_path
*path
;
1750 struct btrfs_key key
;
1751 struct btrfs_chunk
*chunk
;
1752 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
1753 struct btrfs_trans_handle
*trans
;
1754 struct btrfs_key found_key
;
1756 if (dev_root
->fs_info
->sb
->s_flags
& MS_RDONLY
)
1759 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
1760 dev_root
= dev_root
->fs_info
->dev_root
;
1762 /* step one make some room on all the devices */
1763 list_for_each_entry(device
, devices
, dev_list
) {
1764 old_size
= device
->total_bytes
;
1765 size_to_free
= div_factor(old_size
, 1);
1766 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
1767 if (!device
->writeable
||
1768 device
->total_bytes
- device
->bytes_used
> size_to_free
)
1771 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
1774 trans
= btrfs_start_transaction(dev_root
, 1);
1777 ret
= btrfs_grow_device(trans
, device
, old_size
);
1780 btrfs_end_transaction(trans
, dev_root
);
1783 /* step two, relocate all the chunks */
1784 path
= btrfs_alloc_path();
1787 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1788 key
.offset
= (u64
)-1;
1789 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1792 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1797 * this shouldn't happen, it means the last relocate
1803 ret
= btrfs_previous_item(chunk_root
, path
, 0,
1804 BTRFS_CHUNK_ITEM_KEY
);
1808 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1810 if (found_key
.objectid
!= key
.objectid
)
1813 chunk
= btrfs_item_ptr(path
->nodes
[0],
1815 struct btrfs_chunk
);
1816 key
.offset
= found_key
.offset
;
1817 /* chunk zero is special */
1818 if (key
.offset
== 0)
1821 btrfs_release_path(chunk_root
, path
);
1822 ret
= btrfs_relocate_chunk(chunk_root
,
1823 chunk_root
->root_key
.objectid
,
1830 btrfs_free_path(path
);
1831 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
1836 * shrinking a device means finding all of the device extents past
1837 * the new size, and then following the back refs to the chunks.
1838 * The chunk relocation code actually frees the device extent
1840 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
1842 struct btrfs_trans_handle
*trans
;
1843 struct btrfs_root
*root
= device
->dev_root
;
1844 struct btrfs_dev_extent
*dev_extent
= NULL
;
1845 struct btrfs_path
*path
;
1852 struct extent_buffer
*l
;
1853 struct btrfs_key key
;
1854 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1855 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1856 u64 diff
= device
->total_bytes
- new_size
;
1858 if (new_size
>= device
->total_bytes
)
1861 path
= btrfs_alloc_path();
1865 trans
= btrfs_start_transaction(root
, 1);
1875 device
->total_bytes
= new_size
;
1876 if (device
->writeable
)
1877 device
->fs_devices
->total_rw_bytes
-= diff
;
1878 ret
= btrfs_update_device(trans
, device
);
1880 unlock_chunks(root
);
1881 btrfs_end_transaction(trans
, root
);
1884 WARN_ON(diff
> old_total
);
1885 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
1886 unlock_chunks(root
);
1887 btrfs_end_transaction(trans
, root
);
1889 key
.objectid
= device
->devid
;
1890 key
.offset
= (u64
)-1;
1891 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1894 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1898 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
1907 slot
= path
->slots
[0];
1908 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
1910 if (key
.objectid
!= device
->devid
)
1913 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1914 length
= btrfs_dev_extent_length(l
, dev_extent
);
1916 if (key
.offset
+ length
<= new_size
)
1919 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
1920 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
1921 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
1922 btrfs_release_path(root
, path
);
1924 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
1931 btrfs_free_path(path
);
1935 static int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
1936 struct btrfs_root
*root
,
1937 struct btrfs_key
*key
,
1938 struct btrfs_chunk
*chunk
, int item_size
)
1940 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1941 struct btrfs_disk_key disk_key
;
1945 array_size
= btrfs_super_sys_array_size(super_copy
);
1946 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
1949 ptr
= super_copy
->sys_chunk_array
+ array_size
;
1950 btrfs_cpu_key_to_disk(&disk_key
, key
);
1951 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
1952 ptr
+= sizeof(disk_key
);
1953 memcpy(ptr
, chunk
, item_size
);
1954 item_size
+= sizeof(disk_key
);
1955 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
1959 static noinline u64
chunk_bytes_by_type(u64 type
, u64 calc_size
,
1960 int num_stripes
, int sub_stripes
)
1962 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
1964 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1965 return calc_size
* (num_stripes
/ sub_stripes
);
1967 return calc_size
* num_stripes
;
1970 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
1971 struct btrfs_root
*extent_root
,
1972 struct map_lookup
**map_ret
,
1973 u64
*num_bytes
, u64
*stripe_size
,
1974 u64 start
, u64 type
)
1976 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1977 struct btrfs_device
*device
= NULL
;
1978 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
1979 struct list_head
*cur
;
1980 struct map_lookup
*map
= NULL
;
1981 struct extent_map_tree
*em_tree
;
1982 struct extent_map
*em
;
1983 struct list_head private_devs
;
1984 int min_stripe_size
= 1 * 1024 * 1024;
1985 u64 calc_size
= 1024 * 1024 * 1024;
1986 u64 max_chunk_size
= calc_size
;
1991 int num_stripes
= 1;
1992 int min_stripes
= 1;
1993 int sub_stripes
= 0;
1997 int stripe_len
= 64 * 1024;
1999 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
2000 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
2002 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
2004 if (list_empty(&fs_devices
->alloc_list
))
2007 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
2008 num_stripes
= fs_devices
->rw_devices
;
2011 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
2015 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
2016 num_stripes
= min_t(u64
, 2, fs_devices
->rw_devices
);
2017 if (num_stripes
< 2)
2021 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2022 num_stripes
= fs_devices
->rw_devices
;
2023 if (num_stripes
< 4)
2025 num_stripes
&= ~(u32
)1;
2030 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
2031 max_chunk_size
= 10 * calc_size
;
2032 min_stripe_size
= 64 * 1024 * 1024;
2033 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
2034 max_chunk_size
= 4 * calc_size
;
2035 min_stripe_size
= 32 * 1024 * 1024;
2036 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2037 calc_size
= 8 * 1024 * 1024;
2038 max_chunk_size
= calc_size
* 2;
2039 min_stripe_size
= 1 * 1024 * 1024;
2042 /* we don't want a chunk larger than 10% of writeable space */
2043 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
2047 if (!map
|| map
->num_stripes
!= num_stripes
) {
2049 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2052 map
->num_stripes
= num_stripes
;
2055 if (calc_size
* num_stripes
> max_chunk_size
) {
2056 calc_size
= max_chunk_size
;
2057 do_div(calc_size
, num_stripes
);
2058 do_div(calc_size
, stripe_len
);
2059 calc_size
*= stripe_len
;
2061 /* we don't want tiny stripes */
2062 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
2064 do_div(calc_size
, stripe_len
);
2065 calc_size
*= stripe_len
;
2067 cur
= fs_devices
->alloc_list
.next
;
2070 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2071 min_free
= calc_size
* 2;
2073 min_free
= calc_size
;
2076 * we add 1MB because we never use the first 1MB of the device, unless
2077 * we've looped, then we are likely allocating the maximum amount of
2078 * space left already
2081 min_free
+= 1024 * 1024;
2083 INIT_LIST_HEAD(&private_devs
);
2084 while (index
< num_stripes
) {
2085 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
2086 BUG_ON(!device
->writeable
);
2087 if (device
->total_bytes
> device
->bytes_used
)
2088 avail
= device
->total_bytes
- device
->bytes_used
;
2093 if (device
->in_fs_metadata
&& avail
>= min_free
) {
2094 ret
= find_free_dev_extent(trans
, device
,
2095 min_free
, &dev_offset
);
2097 list_move_tail(&device
->dev_alloc_list
,
2099 map
->stripes
[index
].dev
= device
;
2100 map
->stripes
[index
].physical
= dev_offset
;
2102 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2103 map
->stripes
[index
].dev
= device
;
2104 map
->stripes
[index
].physical
=
2105 dev_offset
+ calc_size
;
2109 } else if (device
->in_fs_metadata
&& avail
> max_avail
)
2111 if (cur
== &fs_devices
->alloc_list
)
2114 list_splice(&private_devs
, &fs_devices
->alloc_list
);
2115 if (index
< num_stripes
) {
2116 if (index
>= min_stripes
) {
2117 num_stripes
= index
;
2118 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2119 num_stripes
/= sub_stripes
;
2120 num_stripes
*= sub_stripes
;
2125 if (!looped
&& max_avail
> 0) {
2127 calc_size
= max_avail
;
2133 map
->sector_size
= extent_root
->sectorsize
;
2134 map
->stripe_len
= stripe_len
;
2135 map
->io_align
= stripe_len
;
2136 map
->io_width
= stripe_len
;
2138 map
->num_stripes
= num_stripes
;
2139 map
->sub_stripes
= sub_stripes
;
2142 *stripe_size
= calc_size
;
2143 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
2144 num_stripes
, sub_stripes
);
2146 em
= alloc_extent_map(GFP_NOFS
);
2151 em
->bdev
= (struct block_device
*)map
;
2153 em
->len
= *num_bytes
;
2154 em
->block_start
= 0;
2155 em
->block_len
= em
->len
;
2157 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
2158 spin_lock(&em_tree
->lock
);
2159 ret
= add_extent_mapping(em_tree
, em
);
2160 spin_unlock(&em_tree
->lock
);
2162 free_extent_map(em
);
2164 ret
= btrfs_make_block_group(trans
, extent_root
, 0, type
,
2165 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2170 while (index
< map
->num_stripes
) {
2171 device
= map
->stripes
[index
].dev
;
2172 dev_offset
= map
->stripes
[index
].physical
;
2174 ret
= btrfs_alloc_dev_extent(trans
, device
,
2175 info
->chunk_root
->root_key
.objectid
,
2176 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2177 start
, dev_offset
, calc_size
);
2185 static int __finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
2186 struct btrfs_root
*extent_root
,
2187 struct map_lookup
*map
, u64 chunk_offset
,
2188 u64 chunk_size
, u64 stripe_size
)
2191 struct btrfs_key key
;
2192 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2193 struct btrfs_device
*device
;
2194 struct btrfs_chunk
*chunk
;
2195 struct btrfs_stripe
*stripe
;
2196 size_t item_size
= btrfs_chunk_item_size(map
->num_stripes
);
2200 chunk
= kzalloc(item_size
, GFP_NOFS
);
2205 while (index
< map
->num_stripes
) {
2206 device
= map
->stripes
[index
].dev
;
2207 device
->bytes_used
+= stripe_size
;
2208 ret
= btrfs_update_device(trans
, device
);
2214 stripe
= &chunk
->stripe
;
2215 while (index
< map
->num_stripes
) {
2216 device
= map
->stripes
[index
].dev
;
2217 dev_offset
= map
->stripes
[index
].physical
;
2219 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
2220 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
2221 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
2226 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
2227 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
2228 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
2229 btrfs_set_stack_chunk_type(chunk
, map
->type
);
2230 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
2231 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
2232 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
2233 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
2234 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
2236 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2237 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2238 key
.offset
= chunk_offset
;
2240 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
2243 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2244 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
, chunk
,
2253 * Chunk allocation falls into two parts. The first part does works
2254 * that make the new allocated chunk useable, but not do any operation
2255 * that modifies the chunk tree. The second part does the works that
2256 * require modifying the chunk tree. This division is important for the
2257 * bootstrap process of adding storage to a seed btrfs.
2259 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2260 struct btrfs_root
*extent_root
, u64 type
)
2265 struct map_lookup
*map
;
2266 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2269 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2274 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2275 &stripe_size
, chunk_offset
, type
);
2279 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2280 chunk_size
, stripe_size
);
2285 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
,
2286 struct btrfs_root
*root
,
2287 struct btrfs_device
*device
)
2290 u64 sys_chunk_offset
;
2294 u64 sys_stripe_size
;
2296 struct map_lookup
*map
;
2297 struct map_lookup
*sys_map
;
2298 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2299 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2302 ret
= find_next_chunk(fs_info
->chunk_root
,
2303 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, &chunk_offset
);
2306 alloc_profile
= BTRFS_BLOCK_GROUP_METADATA
|
2307 (fs_info
->metadata_alloc_profile
&
2308 fs_info
->avail_metadata_alloc_bits
);
2309 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2311 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2312 &stripe_size
, chunk_offset
, alloc_profile
);
2315 sys_chunk_offset
= chunk_offset
+ chunk_size
;
2317 alloc_profile
= BTRFS_BLOCK_GROUP_SYSTEM
|
2318 (fs_info
->system_alloc_profile
&
2319 fs_info
->avail_system_alloc_bits
);
2320 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2322 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &sys_map
,
2323 &sys_chunk_size
, &sys_stripe_size
,
2324 sys_chunk_offset
, alloc_profile
);
2327 ret
= btrfs_add_device(trans
, fs_info
->chunk_root
, device
);
2331 * Modifying chunk tree needs allocating new blocks from both
2332 * system block group and metadata block group. So we only can
2333 * do operations require modifying the chunk tree after both
2334 * block groups were created.
2336 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2337 chunk_size
, stripe_size
);
2340 ret
= __finish_chunk_alloc(trans
, extent_root
, sys_map
,
2341 sys_chunk_offset
, sys_chunk_size
,
2347 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
2349 struct extent_map
*em
;
2350 struct map_lookup
*map
;
2351 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2355 spin_lock(&map_tree
->map_tree
.lock
);
2356 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2357 spin_unlock(&map_tree
->map_tree
.lock
);
2361 map
= (struct map_lookup
*)em
->bdev
;
2362 for (i
= 0; i
< map
->num_stripes
; i
++) {
2363 if (!map
->stripes
[i
].dev
->writeable
) {
2368 free_extent_map(em
);
2372 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
2374 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
2377 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
2379 struct extent_map
*em
;
2382 spin_lock(&tree
->map_tree
.lock
);
2383 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
2385 remove_extent_mapping(&tree
->map_tree
, em
);
2386 spin_unlock(&tree
->map_tree
.lock
);
2391 free_extent_map(em
);
2392 /* once for the tree */
2393 free_extent_map(em
);
2397 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
2399 struct extent_map
*em
;
2400 struct map_lookup
*map
;
2401 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2404 spin_lock(&em_tree
->lock
);
2405 em
= lookup_extent_mapping(em_tree
, logical
, len
);
2406 spin_unlock(&em_tree
->lock
);
2409 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2410 map
= (struct map_lookup
*)em
->bdev
;
2411 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
2412 ret
= map
->num_stripes
;
2413 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2414 ret
= map
->sub_stripes
;
2417 free_extent_map(em
);
2421 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
2425 if (map
->stripes
[optimal
].dev
->bdev
)
2427 for (i
= first
; i
< first
+ num
; i
++) {
2428 if (map
->stripes
[i
].dev
->bdev
)
2431 /* we couldn't find one that doesn't fail. Just return something
2432 * and the io error handling code will clean up eventually
2437 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2438 u64 logical
, u64
*length
,
2439 struct btrfs_multi_bio
**multi_ret
,
2440 int mirror_num
, struct page
*unplug_page
)
2442 struct extent_map
*em
;
2443 struct map_lookup
*map
;
2444 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2448 int stripes_allocated
= 8;
2449 int stripes_required
= 1;
2454 struct btrfs_multi_bio
*multi
= NULL
;
2456 if (multi_ret
&& !(rw
& (1 << BIO_RW
)))
2457 stripes_allocated
= 1;
2460 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
2465 atomic_set(&multi
->error
, 0);
2468 spin_lock(&em_tree
->lock
);
2469 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
2470 spin_unlock(&em_tree
->lock
);
2472 if (!em
&& unplug_page
)
2476 printk(KERN_CRIT
"unable to find logical %llu len %llu\n",
2477 (unsigned long long)logical
,
2478 (unsigned long long)*length
);
2482 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2483 map
= (struct map_lookup
*)em
->bdev
;
2484 offset
= logical
- em
->start
;
2486 if (mirror_num
> map
->num_stripes
)
2489 /* if our multi bio struct is too small, back off and try again */
2490 if (rw
& (1 << BIO_RW
)) {
2491 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
2492 BTRFS_BLOCK_GROUP_DUP
)) {
2493 stripes_required
= map
->num_stripes
;
2495 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2496 stripes_required
= map
->sub_stripes
;
2500 if (multi_ret
&& rw
== WRITE
&&
2501 stripes_allocated
< stripes_required
) {
2502 stripes_allocated
= map
->num_stripes
;
2503 free_extent_map(em
);
2509 * stripe_nr counts the total number of stripes we have to stride
2510 * to get to this block
2512 do_div(stripe_nr
, map
->stripe_len
);
2514 stripe_offset
= stripe_nr
* map
->stripe_len
;
2515 BUG_ON(offset
< stripe_offset
);
2517 /* stripe_offset is the offset of this block in its stripe*/
2518 stripe_offset
= offset
- stripe_offset
;
2520 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
2521 BTRFS_BLOCK_GROUP_RAID10
|
2522 BTRFS_BLOCK_GROUP_DUP
)) {
2523 /* we limit the length of each bio to what fits in a stripe */
2524 *length
= min_t(u64
, em
->len
- offset
,
2525 map
->stripe_len
- stripe_offset
);
2527 *length
= em
->len
- offset
;
2530 if (!multi_ret
&& !unplug_page
)
2535 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2536 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2537 num_stripes
= map
->num_stripes
;
2538 else if (mirror_num
)
2539 stripe_index
= mirror_num
- 1;
2541 stripe_index
= find_live_mirror(map
, 0,
2543 current
->pid
% map
->num_stripes
);
2546 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2547 if (rw
& (1 << BIO_RW
))
2548 num_stripes
= map
->num_stripes
;
2549 else if (mirror_num
)
2550 stripe_index
= mirror_num
- 1;
2552 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2553 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2555 stripe_index
= do_div(stripe_nr
, factor
);
2556 stripe_index
*= map
->sub_stripes
;
2558 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2559 num_stripes
= map
->sub_stripes
;
2560 else if (mirror_num
)
2561 stripe_index
+= mirror_num
- 1;
2563 stripe_index
= find_live_mirror(map
, stripe_index
,
2564 map
->sub_stripes
, stripe_index
+
2565 current
->pid
% map
->sub_stripes
);
2569 * after this do_div call, stripe_nr is the number of stripes
2570 * on this device we have to walk to find the data, and
2571 * stripe_index is the number of our device in the stripe array
2573 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
2575 BUG_ON(stripe_index
>= map
->num_stripes
);
2577 for (i
= 0; i
< num_stripes
; i
++) {
2579 struct btrfs_device
*device
;
2580 struct backing_dev_info
*bdi
;
2582 device
= map
->stripes
[stripe_index
].dev
;
2584 bdi
= blk_get_backing_dev_info(device
->bdev
);
2585 if (bdi
->unplug_io_fn
)
2586 bdi
->unplug_io_fn(bdi
, unplug_page
);
2589 multi
->stripes
[i
].physical
=
2590 map
->stripes
[stripe_index
].physical
+
2591 stripe_offset
+ stripe_nr
* map
->stripe_len
;
2592 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
2598 multi
->num_stripes
= num_stripes
;
2599 multi
->max_errors
= max_errors
;
2602 free_extent_map(em
);
2606 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2607 u64 logical
, u64
*length
,
2608 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
2610 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
2614 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
2615 u64 chunk_start
, u64 physical
, u64 devid
,
2616 u64
**logical
, int *naddrs
, int *stripe_len
)
2618 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2619 struct extent_map
*em
;
2620 struct map_lookup
*map
;
2627 spin_lock(&em_tree
->lock
);
2628 em
= lookup_extent_mapping(em_tree
, chunk_start
, 1);
2629 spin_unlock(&em_tree
->lock
);
2631 BUG_ON(!em
|| em
->start
!= chunk_start
);
2632 map
= (struct map_lookup
*)em
->bdev
;
2635 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2636 do_div(length
, map
->num_stripes
/ map
->sub_stripes
);
2637 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
2638 do_div(length
, map
->num_stripes
);
2640 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
2643 for (i
= 0; i
< map
->num_stripes
; i
++) {
2644 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
2646 if (map
->stripes
[i
].physical
> physical
||
2647 map
->stripes
[i
].physical
+ length
<= physical
)
2650 stripe_nr
= physical
- map
->stripes
[i
].physical
;
2651 do_div(stripe_nr
, map
->stripe_len
);
2653 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2654 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
2655 do_div(stripe_nr
, map
->sub_stripes
);
2656 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2657 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
2659 bytenr
= chunk_start
+ stripe_nr
* map
->stripe_len
;
2660 WARN_ON(nr
>= map
->num_stripes
);
2661 for (j
= 0; j
< nr
; j
++) {
2662 if (buf
[j
] == bytenr
)
2666 WARN_ON(nr
>= map
->num_stripes
);
2671 for (i
= 0; i
> nr
; i
++) {
2672 struct btrfs_multi_bio
*multi
;
2673 struct btrfs_bio_stripe
*stripe
;
2677 ret
= btrfs_map_block(map_tree
, WRITE
, buf
[i
],
2678 &length
, &multi
, 0);
2681 stripe
= multi
->stripes
;
2682 for (j
= 0; j
< multi
->num_stripes
; j
++) {
2683 if (stripe
->physical
>= physical
&&
2684 physical
< stripe
->physical
+ length
)
2687 BUG_ON(j
>= multi
->num_stripes
);
2693 *stripe_len
= map
->stripe_len
;
2695 free_extent_map(em
);
2699 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
2700 u64 logical
, struct page
*page
)
2702 u64 length
= PAGE_CACHE_SIZE
;
2703 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
2707 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
2709 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
2710 int is_orig_bio
= 0;
2713 atomic_inc(&multi
->error
);
2715 if (bio
== multi
->orig_bio
)
2718 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
2721 bio
= multi
->orig_bio
;
2723 bio
->bi_private
= multi
->private;
2724 bio
->bi_end_io
= multi
->end_io
;
2725 /* only send an error to the higher layers if it is
2726 * beyond the tolerance of the multi-bio
2728 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
2732 * this bio is actually up to date, we didn't
2733 * go over the max number of errors
2735 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2740 bio_endio(bio
, err
);
2741 } else if (!is_orig_bio
) {
2746 struct async_sched
{
2749 struct btrfs_fs_info
*info
;
2750 struct btrfs_work work
;
2754 * see run_scheduled_bios for a description of why bios are collected for
2757 * This will add one bio to the pending list for a device and make sure
2758 * the work struct is scheduled.
2760 static noinline
int schedule_bio(struct btrfs_root
*root
,
2761 struct btrfs_device
*device
,
2762 int rw
, struct bio
*bio
)
2764 int should_queue
= 1;
2766 /* don't bother with additional async steps for reads, right now */
2767 if (!(rw
& (1 << BIO_RW
))) {
2769 submit_bio(rw
, bio
);
2775 * nr_async_bios allows us to reliably return congestion to the
2776 * higher layers. Otherwise, the async bio makes it appear we have
2777 * made progress against dirty pages when we've really just put it
2778 * on a queue for later
2780 atomic_inc(&root
->fs_info
->nr_async_bios
);
2781 WARN_ON(bio
->bi_next
);
2782 bio
->bi_next
= NULL
;
2785 spin_lock(&device
->io_lock
);
2787 if (device
->pending_bio_tail
)
2788 device
->pending_bio_tail
->bi_next
= bio
;
2790 device
->pending_bio_tail
= bio
;
2791 if (!device
->pending_bios
)
2792 device
->pending_bios
= bio
;
2793 if (device
->running_pending
)
2796 spin_unlock(&device
->io_lock
);
2799 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
2804 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
2805 int mirror_num
, int async_submit
)
2807 struct btrfs_mapping_tree
*map_tree
;
2808 struct btrfs_device
*dev
;
2809 struct bio
*first_bio
= bio
;
2810 u64 logical
= (u64
)bio
->bi_sector
<< 9;
2813 struct btrfs_multi_bio
*multi
= NULL
;
2818 length
= bio
->bi_size
;
2819 map_tree
= &root
->fs_info
->mapping_tree
;
2820 map_length
= length
;
2822 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
2826 total_devs
= multi
->num_stripes
;
2827 if (map_length
< length
) {
2828 printk(KERN_CRIT
"mapping failed logical %llu bio len %llu "
2829 "len %llu\n", (unsigned long long)logical
,
2830 (unsigned long long)length
,
2831 (unsigned long long)map_length
);
2834 multi
->end_io
= first_bio
->bi_end_io
;
2835 multi
->private = first_bio
->bi_private
;
2836 multi
->orig_bio
= first_bio
;
2837 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
2839 while (dev_nr
< total_devs
) {
2840 if (total_devs
> 1) {
2841 if (dev_nr
< total_devs
- 1) {
2842 bio
= bio_clone(first_bio
, GFP_NOFS
);
2847 bio
->bi_private
= multi
;
2848 bio
->bi_end_io
= end_bio_multi_stripe
;
2850 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
2851 dev
= multi
->stripes
[dev_nr
].dev
;
2852 BUG_ON(rw
== WRITE
&& !dev
->writeable
);
2853 if (dev
&& dev
->bdev
) {
2854 bio
->bi_bdev
= dev
->bdev
;
2856 schedule_bio(root
, dev
, rw
, bio
);
2858 submit_bio(rw
, bio
);
2860 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
2861 bio
->bi_sector
= logical
>> 9;
2862 bio_endio(bio
, -EIO
);
2866 if (total_devs
== 1)
2871 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
2874 struct btrfs_device
*device
;
2875 struct btrfs_fs_devices
*cur_devices
;
2877 cur_devices
= root
->fs_info
->fs_devices
;
2878 while (cur_devices
) {
2880 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
2881 device
= __find_device(&cur_devices
->devices
,
2886 cur_devices
= cur_devices
->seed
;
2891 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
2892 u64 devid
, u8
*dev_uuid
)
2894 struct btrfs_device
*device
;
2895 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
2897 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
2900 list_add(&device
->dev_list
,
2901 &fs_devices
->devices
);
2902 device
->barriers
= 1;
2903 device
->dev_root
= root
->fs_info
->dev_root
;
2904 device
->devid
= devid
;
2905 device
->work
.func
= pending_bios_fn
;
2906 device
->fs_devices
= fs_devices
;
2907 fs_devices
->num_devices
++;
2908 spin_lock_init(&device
->io_lock
);
2909 INIT_LIST_HEAD(&device
->dev_alloc_list
);
2910 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
2914 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
2915 struct extent_buffer
*leaf
,
2916 struct btrfs_chunk
*chunk
)
2918 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2919 struct map_lookup
*map
;
2920 struct extent_map
*em
;
2924 u8 uuid
[BTRFS_UUID_SIZE
];
2929 logical
= key
->offset
;
2930 length
= btrfs_chunk_length(leaf
, chunk
);
2932 spin_lock(&map_tree
->map_tree
.lock
);
2933 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
2934 spin_unlock(&map_tree
->map_tree
.lock
);
2936 /* already mapped? */
2937 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
2938 free_extent_map(em
);
2941 free_extent_map(em
);
2944 em
= alloc_extent_map(GFP_NOFS
);
2947 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
2948 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2950 free_extent_map(em
);
2954 em
->bdev
= (struct block_device
*)map
;
2955 em
->start
= logical
;
2957 em
->block_start
= 0;
2958 em
->block_len
= em
->len
;
2960 map
->num_stripes
= num_stripes
;
2961 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
2962 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
2963 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
2964 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
2965 map
->type
= btrfs_chunk_type(leaf
, chunk
);
2966 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
2967 for (i
= 0; i
< num_stripes
; i
++) {
2968 map
->stripes
[i
].physical
=
2969 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
2970 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
2971 read_extent_buffer(leaf
, uuid
, (unsigned long)
2972 btrfs_stripe_dev_uuid_nr(chunk
, i
),
2974 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
2976 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
2978 free_extent_map(em
);
2981 if (!map
->stripes
[i
].dev
) {
2982 map
->stripes
[i
].dev
=
2983 add_missing_dev(root
, devid
, uuid
);
2984 if (!map
->stripes
[i
].dev
) {
2986 free_extent_map(em
);
2990 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
2993 spin_lock(&map_tree
->map_tree
.lock
);
2994 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
2995 spin_unlock(&map_tree
->map_tree
.lock
);
2997 free_extent_map(em
);
3002 static int fill_device_from_item(struct extent_buffer
*leaf
,
3003 struct btrfs_dev_item
*dev_item
,
3004 struct btrfs_device
*device
)
3008 device
->devid
= btrfs_device_id(leaf
, dev_item
);
3009 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
3010 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
3011 device
->type
= btrfs_device_type(leaf
, dev_item
);
3012 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
3013 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
3014 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
3016 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
3017 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
3022 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
3024 struct btrfs_fs_devices
*fs_devices
;
3027 mutex_lock(&uuid_mutex
);
3029 fs_devices
= root
->fs_info
->fs_devices
->seed
;
3030 while (fs_devices
) {
3031 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3035 fs_devices
= fs_devices
->seed
;
3038 fs_devices
= find_fsid(fsid
);
3044 fs_devices
= clone_fs_devices(fs_devices
);
3045 if (IS_ERR(fs_devices
)) {
3046 ret
= PTR_ERR(fs_devices
);
3050 ret
= __btrfs_open_devices(fs_devices
, FMODE_READ
,
3051 root
->fs_info
->bdev_holder
);
3055 if (!fs_devices
->seeding
) {
3056 __btrfs_close_devices(fs_devices
);
3057 free_fs_devices(fs_devices
);
3062 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
3063 root
->fs_info
->fs_devices
->seed
= fs_devices
;
3065 mutex_unlock(&uuid_mutex
);
3069 static int read_one_dev(struct btrfs_root
*root
,
3070 struct extent_buffer
*leaf
,
3071 struct btrfs_dev_item
*dev_item
)
3073 struct btrfs_device
*device
;
3076 u8 fs_uuid
[BTRFS_UUID_SIZE
];
3077 u8 dev_uuid
[BTRFS_UUID_SIZE
];
3079 devid
= btrfs_device_id(leaf
, dev_item
);
3080 read_extent_buffer(leaf
, dev_uuid
,
3081 (unsigned long)btrfs_device_uuid(dev_item
),
3083 read_extent_buffer(leaf
, fs_uuid
,
3084 (unsigned long)btrfs_device_fsid(dev_item
),
3087 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
3088 ret
= open_seed_devices(root
, fs_uuid
);
3089 if (ret
&& !btrfs_test_opt(root
, DEGRADED
))
3093 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
3094 if (!device
|| !device
->bdev
) {
3095 if (!btrfs_test_opt(root
, DEGRADED
))
3099 printk(KERN_WARNING
"warning devid %llu missing\n",
3100 (unsigned long long)devid
);
3101 device
= add_missing_dev(root
, devid
, dev_uuid
);
3107 if (device
->fs_devices
!= root
->fs_info
->fs_devices
) {
3108 BUG_ON(device
->writeable
);
3109 if (device
->generation
!=
3110 btrfs_device_generation(leaf
, dev_item
))
3114 fill_device_from_item(leaf
, dev_item
, device
);
3115 device
->dev_root
= root
->fs_info
->dev_root
;
3116 device
->in_fs_metadata
= 1;
3117 if (device
->writeable
)
3118 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
3123 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
3125 struct btrfs_dev_item
*dev_item
;
3127 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
3129 return read_one_dev(root
, buf
, dev_item
);
3132 int btrfs_read_sys_array(struct btrfs_root
*root
)
3134 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
3135 struct extent_buffer
*sb
;
3136 struct btrfs_disk_key
*disk_key
;
3137 struct btrfs_chunk
*chunk
;
3139 unsigned long sb_ptr
;
3145 struct btrfs_key key
;
3147 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
3148 BTRFS_SUPER_INFO_SIZE
);
3151 btrfs_set_buffer_uptodate(sb
);
3152 btrfs_set_buffer_lockdep_class(sb
, 0);
3154 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
3155 array_size
= btrfs_super_sys_array_size(super_copy
);
3157 ptr
= super_copy
->sys_chunk_array
;
3158 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
3161 while (cur
< array_size
) {
3162 disk_key
= (struct btrfs_disk_key
*)ptr
;
3163 btrfs_disk_key_to_cpu(&key
, disk_key
);
3165 len
= sizeof(*disk_key
); ptr
+= len
;
3169 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3170 chunk
= (struct btrfs_chunk
*)sb_ptr
;
3171 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
3174 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
3175 len
= btrfs_chunk_item_size(num_stripes
);
3184 free_extent_buffer(sb
);
3188 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
3190 struct btrfs_path
*path
;
3191 struct extent_buffer
*leaf
;
3192 struct btrfs_key key
;
3193 struct btrfs_key found_key
;
3197 root
= root
->fs_info
->chunk_root
;
3199 path
= btrfs_alloc_path();
3203 /* first we search for all of the device items, and then we
3204 * read in all of the chunk items. This way we can create chunk
3205 * mappings that reference all of the devices that are afound
3207 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3211 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3213 leaf
= path
->nodes
[0];
3214 slot
= path
->slots
[0];
3215 if (slot
>= btrfs_header_nritems(leaf
)) {
3216 ret
= btrfs_next_leaf(root
, path
);
3223 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3224 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3225 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
3227 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
3228 struct btrfs_dev_item
*dev_item
;
3229 dev_item
= btrfs_item_ptr(leaf
, slot
,
3230 struct btrfs_dev_item
);
3231 ret
= read_one_dev(root
, leaf
, dev_item
);
3235 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3236 struct btrfs_chunk
*chunk
;
3237 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3238 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
3244 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3246 btrfs_release_path(root
, path
);
3251 btrfs_free_path(path
);