2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
36 static int init_first_rw_device(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct btrfs_device
*device
);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
);
41 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
44 static DEFINE_MUTEX(uuid_mutex
);
45 static LIST_HEAD(fs_uuids
);
47 void btrfs_lock_volumes(void)
49 mutex_lock(&uuid_mutex
);
52 void btrfs_unlock_volumes(void)
54 mutex_unlock(&uuid_mutex
);
57 static void lock_chunks(struct btrfs_root
*root
)
59 mutex_lock(&root
->fs_info
->chunk_mutex
);
62 static void unlock_chunks(struct btrfs_root
*root
)
64 mutex_unlock(&root
->fs_info
->chunk_mutex
);
67 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
69 struct btrfs_device
*device
;
70 WARN_ON(fs_devices
->opened
);
71 while (!list_empty(&fs_devices
->devices
)) {
72 device
= list_entry(fs_devices
->devices
.next
,
73 struct btrfs_device
, dev_list
);
74 list_del(&device
->dev_list
);
81 int btrfs_cleanup_fs_uuids(void)
83 struct btrfs_fs_devices
*fs_devices
;
85 while (!list_empty(&fs_uuids
)) {
86 fs_devices
= list_entry(fs_uuids
.next
,
87 struct btrfs_fs_devices
, list
);
88 list_del(&fs_devices
->list
);
89 free_fs_devices(fs_devices
);
94 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
97 struct btrfs_device
*dev
;
99 list_for_each_entry(dev
, head
, dev_list
) {
100 if (dev
->devid
== devid
&&
101 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
108 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
110 struct btrfs_fs_devices
*fs_devices
;
112 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
113 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
119 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
120 struct bio
*head
, struct bio
*tail
)
123 struct bio
*old_head
;
125 old_head
= pending_bios
->head
;
126 pending_bios
->head
= head
;
127 if (pending_bios
->tail
)
128 tail
->bi_next
= old_head
;
130 pending_bios
->tail
= tail
;
134 * we try to collect pending bios for a device so we don't get a large
135 * number of procs sending bios down to the same device. This greatly
136 * improves the schedulers ability to collect and merge the bios.
138 * But, it also turns into a long list of bios to process and that is sure
139 * to eventually make the worker thread block. The solution here is to
140 * make some progress and then put this work struct back at the end of
141 * the list if the block device is congested. This way, multiple devices
142 * can make progress from a single worker thread.
144 static noinline
int run_scheduled_bios(struct btrfs_device
*device
)
147 struct backing_dev_info
*bdi
;
148 struct btrfs_fs_info
*fs_info
;
149 struct btrfs_pending_bios
*pending_bios
;
153 unsigned long num_run
;
154 unsigned long num_sync_run
;
155 unsigned long batch_run
= 0;
157 unsigned long last_waited
= 0;
160 bdi
= blk_get_backing_dev_info(device
->bdev
);
161 fs_info
= device
->dev_root
->fs_info
;
162 limit
= btrfs_async_submit_limit(fs_info
);
163 limit
= limit
* 2 / 3;
165 /* we want to make sure that every time we switch from the sync
166 * list to the normal list, we unplug
171 spin_lock(&device
->io_lock
);
176 /* take all the bios off the list at once and process them
177 * later on (without the lock held). But, remember the
178 * tail and other pointers so the bios can be properly reinserted
179 * into the list if we hit congestion
181 if (!force_reg
&& device
->pending_sync_bios
.head
) {
182 pending_bios
= &device
->pending_sync_bios
;
185 pending_bios
= &device
->pending_bios
;
189 pending
= pending_bios
->head
;
190 tail
= pending_bios
->tail
;
191 WARN_ON(pending
&& !tail
);
194 * if pending was null this time around, no bios need processing
195 * at all and we can stop. Otherwise it'll loop back up again
196 * and do an additional check so no bios are missed.
198 * device->running_pending is used to synchronize with the
201 if (device
->pending_sync_bios
.head
== NULL
&&
202 device
->pending_bios
.head
== NULL
) {
204 device
->running_pending
= 0;
207 device
->running_pending
= 1;
210 pending_bios
->head
= NULL
;
211 pending_bios
->tail
= NULL
;
213 spin_unlock(&device
->io_lock
);
216 * if we're doing the regular priority list, make sure we unplug
217 * for any high prio bios we've sent down
219 if (pending_bios
== &device
->pending_bios
&& num_sync_run
> 0) {
221 blk_run_backing_dev(bdi
, NULL
);
227 /* we want to work on both lists, but do more bios on the
228 * sync list than the regular list
231 pending_bios
!= &device
->pending_sync_bios
&&
232 device
->pending_sync_bios
.head
) ||
233 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
234 device
->pending_bios
.head
)) {
235 spin_lock(&device
->io_lock
);
236 requeue_list(pending_bios
, pending
, tail
);
241 pending
= pending
->bi_next
;
243 atomic_dec(&fs_info
->nr_async_bios
);
245 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
246 waitqueue_active(&fs_info
->async_submit_wait
))
247 wake_up(&fs_info
->async_submit_wait
);
249 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
251 if (cur
->bi_rw
& REQ_SYNC
)
254 submit_bio(cur
->bi_rw
, cur
);
257 if (need_resched()) {
259 blk_run_backing_dev(bdi
, NULL
);
266 * we made progress, there is more work to do and the bdi
267 * is now congested. Back off and let other work structs
270 if (pending
&& bdi_write_congested(bdi
) && batch_run
> 8 &&
271 fs_info
->fs_devices
->open_devices
> 1) {
272 struct io_context
*ioc
;
274 ioc
= current
->io_context
;
277 * the main goal here is that we don't want to
278 * block if we're going to be able to submit
279 * more requests without blocking.
281 * This code does two great things, it pokes into
282 * the elevator code from a filesystem _and_
283 * it makes assumptions about how batching works.
285 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
286 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
288 ioc
->last_waited
== last_waited
)) {
290 * we want to go through our batch of
291 * requests and stop. So, we copy out
292 * the ioc->last_waited time and test
293 * against it before looping
295 last_waited
= ioc
->last_waited
;
296 if (need_resched()) {
298 blk_run_backing_dev(bdi
, NULL
);
305 spin_lock(&device
->io_lock
);
306 requeue_list(pending_bios
, pending
, tail
);
307 device
->running_pending
= 1;
309 spin_unlock(&device
->io_lock
);
310 btrfs_requeue_work(&device
->work
);
317 blk_run_backing_dev(bdi
, NULL
);
320 * IO has already been through a long path to get here. Checksumming,
321 * async helper threads, perhaps compression. We've done a pretty
322 * good job of collecting a batch of IO and should just unplug
323 * the device right away.
325 * This will help anyone who is waiting on the IO, they might have
326 * already unplugged, but managed to do so before the bio they
327 * cared about found its way down here.
329 blk_run_backing_dev(bdi
, NULL
);
335 spin_lock(&device
->io_lock
);
336 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
338 spin_unlock(&device
->io_lock
);
344 static void pending_bios_fn(struct btrfs_work
*work
)
346 struct btrfs_device
*device
;
348 device
= container_of(work
, struct btrfs_device
, work
);
349 run_scheduled_bios(device
);
352 static noinline
int device_list_add(const char *path
,
353 struct btrfs_super_block
*disk_super
,
354 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
356 struct btrfs_device
*device
;
357 struct btrfs_fs_devices
*fs_devices
;
358 u64 found_transid
= btrfs_super_generation(disk_super
);
361 fs_devices
= find_fsid(disk_super
->fsid
);
363 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
366 INIT_LIST_HEAD(&fs_devices
->devices
);
367 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
368 list_add(&fs_devices
->list
, &fs_uuids
);
369 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
370 fs_devices
->latest_devid
= devid
;
371 fs_devices
->latest_trans
= found_transid
;
372 mutex_init(&fs_devices
->device_list_mutex
);
375 device
= __find_device(&fs_devices
->devices
, devid
,
376 disk_super
->dev_item
.uuid
);
379 if (fs_devices
->opened
)
382 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
384 /* we can safely leave the fs_devices entry around */
387 device
->devid
= devid
;
388 device
->work
.func
= pending_bios_fn
;
389 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
391 spin_lock_init(&device
->io_lock
);
392 device
->name
= kstrdup(path
, GFP_NOFS
);
397 INIT_LIST_HEAD(&device
->dev_alloc_list
);
399 mutex_lock(&fs_devices
->device_list_mutex
);
400 list_add(&device
->dev_list
, &fs_devices
->devices
);
401 mutex_unlock(&fs_devices
->device_list_mutex
);
403 device
->fs_devices
= fs_devices
;
404 fs_devices
->num_devices
++;
405 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
406 name
= kstrdup(path
, GFP_NOFS
);
411 if (device
->missing
) {
412 fs_devices
->missing_devices
--;
417 if (found_transid
> fs_devices
->latest_trans
) {
418 fs_devices
->latest_devid
= devid
;
419 fs_devices
->latest_trans
= found_transid
;
421 *fs_devices_ret
= fs_devices
;
425 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
427 struct btrfs_fs_devices
*fs_devices
;
428 struct btrfs_device
*device
;
429 struct btrfs_device
*orig_dev
;
431 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
433 return ERR_PTR(-ENOMEM
);
435 INIT_LIST_HEAD(&fs_devices
->devices
);
436 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
437 INIT_LIST_HEAD(&fs_devices
->list
);
438 mutex_init(&fs_devices
->device_list_mutex
);
439 fs_devices
->latest_devid
= orig
->latest_devid
;
440 fs_devices
->latest_trans
= orig
->latest_trans
;
441 memcpy(fs_devices
->fsid
, orig
->fsid
, sizeof(fs_devices
->fsid
));
443 mutex_lock(&orig
->device_list_mutex
);
444 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
445 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
449 device
->name
= kstrdup(orig_dev
->name
, GFP_NOFS
);
455 device
->devid
= orig_dev
->devid
;
456 device
->work
.func
= pending_bios_fn
;
457 memcpy(device
->uuid
, orig_dev
->uuid
, sizeof(device
->uuid
));
458 spin_lock_init(&device
->io_lock
);
459 INIT_LIST_HEAD(&device
->dev_list
);
460 INIT_LIST_HEAD(&device
->dev_alloc_list
);
462 list_add(&device
->dev_list
, &fs_devices
->devices
);
463 device
->fs_devices
= fs_devices
;
464 fs_devices
->num_devices
++;
466 mutex_unlock(&orig
->device_list_mutex
);
469 mutex_unlock(&orig
->device_list_mutex
);
470 free_fs_devices(fs_devices
);
471 return ERR_PTR(-ENOMEM
);
474 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
476 struct btrfs_device
*device
, *next
;
478 mutex_lock(&uuid_mutex
);
480 mutex_lock(&fs_devices
->device_list_mutex
);
481 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
482 if (device
->in_fs_metadata
)
486 blkdev_put(device
->bdev
, device
->mode
);
488 fs_devices
->open_devices
--;
490 if (device
->writeable
) {
491 list_del_init(&device
->dev_alloc_list
);
492 device
->writeable
= 0;
493 fs_devices
->rw_devices
--;
495 list_del_init(&device
->dev_list
);
496 fs_devices
->num_devices
--;
500 mutex_unlock(&fs_devices
->device_list_mutex
);
502 if (fs_devices
->seed
) {
503 fs_devices
= fs_devices
->seed
;
507 mutex_unlock(&uuid_mutex
);
511 static int __btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
513 struct btrfs_device
*device
;
515 if (--fs_devices
->opened
> 0)
518 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
520 blkdev_put(device
->bdev
, device
->mode
);
521 fs_devices
->open_devices
--;
523 if (device
->writeable
) {
524 list_del_init(&device
->dev_alloc_list
);
525 fs_devices
->rw_devices
--;
529 device
->writeable
= 0;
530 device
->in_fs_metadata
= 0;
532 WARN_ON(fs_devices
->open_devices
);
533 WARN_ON(fs_devices
->rw_devices
);
534 fs_devices
->opened
= 0;
535 fs_devices
->seeding
= 0;
540 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
542 struct btrfs_fs_devices
*seed_devices
= NULL
;
545 mutex_lock(&uuid_mutex
);
546 ret
= __btrfs_close_devices(fs_devices
);
547 if (!fs_devices
->opened
) {
548 seed_devices
= fs_devices
->seed
;
549 fs_devices
->seed
= NULL
;
551 mutex_unlock(&uuid_mutex
);
553 while (seed_devices
) {
554 fs_devices
= seed_devices
;
555 seed_devices
= fs_devices
->seed
;
556 __btrfs_close_devices(fs_devices
);
557 free_fs_devices(fs_devices
);
562 static int __btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
563 fmode_t flags
, void *holder
)
565 struct block_device
*bdev
;
566 struct list_head
*head
= &fs_devices
->devices
;
567 struct btrfs_device
*device
;
568 struct block_device
*latest_bdev
= NULL
;
569 struct buffer_head
*bh
;
570 struct btrfs_super_block
*disk_super
;
571 u64 latest_devid
= 0;
572 u64 latest_transid
= 0;
579 list_for_each_entry(device
, head
, dev_list
) {
585 bdev
= blkdev_get_by_path(device
->name
, flags
, holder
);
587 printk(KERN_INFO
"open %s failed\n", device
->name
);
590 set_blocksize(bdev
, 4096);
592 bh
= btrfs_read_dev_super(bdev
);
598 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
599 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
600 if (devid
!= device
->devid
)
603 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
,
607 device
->generation
= btrfs_super_generation(disk_super
);
608 if (!latest_transid
|| device
->generation
> latest_transid
) {
609 latest_devid
= devid
;
610 latest_transid
= device
->generation
;
614 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
615 device
->writeable
= 0;
617 device
->writeable
= !bdev_read_only(bdev
);
622 device
->in_fs_metadata
= 0;
623 device
->mode
= flags
;
625 if (!blk_queue_nonrot(bdev_get_queue(bdev
)))
626 fs_devices
->rotating
= 1;
628 fs_devices
->open_devices
++;
629 if (device
->writeable
) {
630 fs_devices
->rw_devices
++;
631 list_add(&device
->dev_alloc_list
,
632 &fs_devices
->alloc_list
);
639 blkdev_put(bdev
, flags
);
643 if (fs_devices
->open_devices
== 0) {
647 fs_devices
->seeding
= seeding
;
648 fs_devices
->opened
= 1;
649 fs_devices
->latest_bdev
= latest_bdev
;
650 fs_devices
->latest_devid
= latest_devid
;
651 fs_devices
->latest_trans
= latest_transid
;
652 fs_devices
->total_rw_bytes
= 0;
657 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
658 fmode_t flags
, void *holder
)
662 mutex_lock(&uuid_mutex
);
663 if (fs_devices
->opened
) {
664 fs_devices
->opened
++;
667 ret
= __btrfs_open_devices(fs_devices
, flags
, holder
);
669 mutex_unlock(&uuid_mutex
);
673 int btrfs_scan_one_device(const char *path
, fmode_t flags
, void *holder
,
674 struct btrfs_fs_devices
**fs_devices_ret
)
676 struct btrfs_super_block
*disk_super
;
677 struct block_device
*bdev
;
678 struct buffer_head
*bh
;
683 mutex_lock(&uuid_mutex
);
686 bdev
= blkdev_get_by_path(path
, flags
, holder
);
693 ret
= set_blocksize(bdev
, 4096);
696 bh
= btrfs_read_dev_super(bdev
);
701 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
702 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
703 transid
= btrfs_super_generation(disk_super
);
704 if (disk_super
->label
[0])
705 printk(KERN_INFO
"device label %s ", disk_super
->label
);
707 /* FIXME, make a readl uuid parser */
708 printk(KERN_INFO
"device fsid %llx-%llx ",
709 *(unsigned long long *)disk_super
->fsid
,
710 *(unsigned long long *)(disk_super
->fsid
+ 8));
712 printk(KERN_CONT
"devid %llu transid %llu %s\n",
713 (unsigned long long)devid
, (unsigned long long)transid
, path
);
714 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
718 blkdev_put(bdev
, flags
);
720 mutex_unlock(&uuid_mutex
);
724 /* helper to account the used device space in the range */
725 int btrfs_account_dev_extents_size(struct btrfs_device
*device
, u64 start
,
726 u64 end
, u64
*length
)
728 struct btrfs_key key
;
729 struct btrfs_root
*root
= device
->dev_root
;
730 struct btrfs_dev_extent
*dev_extent
;
731 struct btrfs_path
*path
;
735 struct extent_buffer
*l
;
739 if (start
>= device
->total_bytes
)
742 path
= btrfs_alloc_path();
747 key
.objectid
= device
->devid
;
749 key
.type
= BTRFS_DEV_EXTENT_KEY
;
751 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
755 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
762 slot
= path
->slots
[0];
763 if (slot
>= btrfs_header_nritems(l
)) {
764 ret
= btrfs_next_leaf(root
, path
);
772 btrfs_item_key_to_cpu(l
, &key
, slot
);
774 if (key
.objectid
< device
->devid
)
777 if (key
.objectid
> device
->devid
)
780 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
783 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
784 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
786 if (key
.offset
<= start
&& extent_end
> end
) {
787 *length
= end
- start
+ 1;
789 } else if (key
.offset
<= start
&& extent_end
> start
)
790 *length
+= extent_end
- start
;
791 else if (key
.offset
> start
&& extent_end
<= end
)
792 *length
+= extent_end
- key
.offset
;
793 else if (key
.offset
> start
&& key
.offset
<= end
) {
794 *length
+= end
- key
.offset
+ 1;
796 } else if (key
.offset
> end
)
804 btrfs_free_path(path
);
809 * find_free_dev_extent - find free space in the specified device
810 * @trans: transaction handler
811 * @device: the device which we search the free space in
812 * @num_bytes: the size of the free space that we need
813 * @start: store the start of the free space.
814 * @len: the size of the free space. that we find, or the size of the max
815 * free space if we don't find suitable free space
817 * this uses a pretty simple search, the expectation is that it is
818 * called very infrequently and that a given device has a small number
821 * @start is used to store the start of the free space if we find. But if we
822 * don't find suitable free space, it will be used to store the start position
823 * of the max free space.
825 * @len is used to store the size of the free space that we find.
826 * But if we don't find suitable free space, it is used to store the size of
827 * the max free space.
829 int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
830 struct btrfs_device
*device
, u64 num_bytes
,
831 u64
*start
, u64
*len
)
833 struct btrfs_key key
;
834 struct btrfs_root
*root
= device
->dev_root
;
835 struct btrfs_dev_extent
*dev_extent
;
836 struct btrfs_path
*path
;
842 u64 search_end
= device
->total_bytes
;
845 struct extent_buffer
*l
;
847 /* FIXME use last free of some kind */
849 /* we don't want to overwrite the superblock on the drive,
850 * so we make sure to start at an offset of at least 1MB
852 search_start
= 1024 * 1024;
854 if (root
->fs_info
->alloc_start
+ num_bytes
<= search_end
)
855 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
857 max_hole_start
= search_start
;
860 if (search_start
>= search_end
) {
865 path
= btrfs_alloc_path();
872 key
.objectid
= device
->devid
;
873 key
.offset
= search_start
;
874 key
.type
= BTRFS_DEV_EXTENT_KEY
;
876 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
880 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
887 slot
= path
->slots
[0];
888 if (slot
>= btrfs_header_nritems(l
)) {
889 ret
= btrfs_next_leaf(root
, path
);
897 btrfs_item_key_to_cpu(l
, &key
, slot
);
899 if (key
.objectid
< device
->devid
)
902 if (key
.objectid
> device
->devid
)
905 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
908 if (key
.offset
> search_start
) {
909 hole_size
= key
.offset
- search_start
;
911 if (hole_size
> max_hole_size
) {
912 max_hole_start
= search_start
;
913 max_hole_size
= hole_size
;
917 * If this free space is greater than which we need,
918 * it must be the max free space that we have found
919 * until now, so max_hole_start must point to the start
920 * of this free space and the length of this free space
921 * is stored in max_hole_size. Thus, we return
922 * max_hole_start and max_hole_size and go back to the
925 if (hole_size
>= num_bytes
) {
931 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
932 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
934 if (extent_end
> search_start
)
935 search_start
= extent_end
;
941 hole_size
= search_end
- search_start
;
942 if (hole_size
> max_hole_size
) {
943 max_hole_start
= search_start
;
944 max_hole_size
= hole_size
;
948 if (hole_size
< num_bytes
)
954 btrfs_free_path(path
);
956 *start
= max_hole_start
;
958 *len
= max_hole_size
;
962 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
963 struct btrfs_device
*device
,
967 struct btrfs_path
*path
;
968 struct btrfs_root
*root
= device
->dev_root
;
969 struct btrfs_key key
;
970 struct btrfs_key found_key
;
971 struct extent_buffer
*leaf
= NULL
;
972 struct btrfs_dev_extent
*extent
= NULL
;
974 path
= btrfs_alloc_path();
978 key
.objectid
= device
->devid
;
980 key
.type
= BTRFS_DEV_EXTENT_KEY
;
982 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
984 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
985 BTRFS_DEV_EXTENT_KEY
);
987 leaf
= path
->nodes
[0];
988 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
989 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
990 struct btrfs_dev_extent
);
991 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
992 btrfs_dev_extent_length(leaf
, extent
) < start
);
994 } else if (ret
== 0) {
995 leaf
= path
->nodes
[0];
996 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
997 struct btrfs_dev_extent
);
1001 if (device
->bytes_used
> 0)
1002 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
1003 ret
= btrfs_del_item(trans
, root
, path
);
1006 btrfs_free_path(path
);
1010 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
1011 struct btrfs_device
*device
,
1012 u64 chunk_tree
, u64 chunk_objectid
,
1013 u64 chunk_offset
, u64 start
, u64 num_bytes
)
1016 struct btrfs_path
*path
;
1017 struct btrfs_root
*root
= device
->dev_root
;
1018 struct btrfs_dev_extent
*extent
;
1019 struct extent_buffer
*leaf
;
1020 struct btrfs_key key
;
1022 WARN_ON(!device
->in_fs_metadata
);
1023 path
= btrfs_alloc_path();
1027 key
.objectid
= device
->devid
;
1029 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1030 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1034 leaf
= path
->nodes
[0];
1035 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1036 struct btrfs_dev_extent
);
1037 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
1038 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
1039 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
1041 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
1042 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
1045 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1046 btrfs_mark_buffer_dirty(leaf
);
1047 btrfs_free_path(path
);
1051 static noinline
int find_next_chunk(struct btrfs_root
*root
,
1052 u64 objectid
, u64
*offset
)
1054 struct btrfs_path
*path
;
1056 struct btrfs_key key
;
1057 struct btrfs_chunk
*chunk
;
1058 struct btrfs_key found_key
;
1060 path
= btrfs_alloc_path();
1063 key
.objectid
= objectid
;
1064 key
.offset
= (u64
)-1;
1065 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1067 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1073 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
1077 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1079 if (found_key
.objectid
!= objectid
)
1082 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1083 struct btrfs_chunk
);
1084 *offset
= found_key
.offset
+
1085 btrfs_chunk_length(path
->nodes
[0], chunk
);
1090 btrfs_free_path(path
);
1094 static noinline
int find_next_devid(struct btrfs_root
*root
, u64
*objectid
)
1097 struct btrfs_key key
;
1098 struct btrfs_key found_key
;
1099 struct btrfs_path
*path
;
1101 root
= root
->fs_info
->chunk_root
;
1103 path
= btrfs_alloc_path();
1107 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1108 key
.type
= BTRFS_DEV_ITEM_KEY
;
1109 key
.offset
= (u64
)-1;
1111 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1117 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
1118 BTRFS_DEV_ITEM_KEY
);
1122 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1124 *objectid
= found_key
.offset
+ 1;
1128 btrfs_free_path(path
);
1133 * the device information is stored in the chunk root
1134 * the btrfs_device struct should be fully filled in
1136 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
1137 struct btrfs_root
*root
,
1138 struct btrfs_device
*device
)
1141 struct btrfs_path
*path
;
1142 struct btrfs_dev_item
*dev_item
;
1143 struct extent_buffer
*leaf
;
1144 struct btrfs_key key
;
1147 root
= root
->fs_info
->chunk_root
;
1149 path
= btrfs_alloc_path();
1153 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1154 key
.type
= BTRFS_DEV_ITEM_KEY
;
1155 key
.offset
= device
->devid
;
1157 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1162 leaf
= path
->nodes
[0];
1163 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1165 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1166 btrfs_set_device_generation(leaf
, dev_item
, 0);
1167 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1168 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1169 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1170 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1171 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1172 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1173 btrfs_set_device_group(leaf
, dev_item
, 0);
1174 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1175 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1176 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1178 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1179 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1180 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
1181 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
1182 btrfs_mark_buffer_dirty(leaf
);
1186 btrfs_free_path(path
);
1190 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
1191 struct btrfs_device
*device
)
1194 struct btrfs_path
*path
;
1195 struct btrfs_key key
;
1196 struct btrfs_trans_handle
*trans
;
1198 root
= root
->fs_info
->chunk_root
;
1200 path
= btrfs_alloc_path();
1204 trans
= btrfs_start_transaction(root
, 0);
1205 if (IS_ERR(trans
)) {
1206 btrfs_free_path(path
);
1207 return PTR_ERR(trans
);
1209 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1210 key
.type
= BTRFS_DEV_ITEM_KEY
;
1211 key
.offset
= device
->devid
;
1214 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1223 ret
= btrfs_del_item(trans
, root
, path
);
1227 btrfs_free_path(path
);
1228 unlock_chunks(root
);
1229 btrfs_commit_transaction(trans
, root
);
1233 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
1235 struct btrfs_device
*device
;
1236 struct btrfs_device
*next_device
;
1237 struct block_device
*bdev
;
1238 struct buffer_head
*bh
= NULL
;
1239 struct btrfs_super_block
*disk_super
;
1246 mutex_lock(&uuid_mutex
);
1247 mutex_lock(&root
->fs_info
->volume_mutex
);
1249 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
1250 root
->fs_info
->avail_system_alloc_bits
|
1251 root
->fs_info
->avail_metadata_alloc_bits
;
1253 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
1254 root
->fs_info
->fs_devices
->num_devices
<= 4) {
1255 printk(KERN_ERR
"btrfs: unable to go below four devices "
1261 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
1262 root
->fs_info
->fs_devices
->num_devices
<= 2) {
1263 printk(KERN_ERR
"btrfs: unable to go below two "
1264 "devices on raid1\n");
1269 if (strcmp(device_path
, "missing") == 0) {
1270 struct list_head
*devices
;
1271 struct btrfs_device
*tmp
;
1274 devices
= &root
->fs_info
->fs_devices
->devices
;
1275 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1276 list_for_each_entry(tmp
, devices
, dev_list
) {
1277 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
1282 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1287 printk(KERN_ERR
"btrfs: no missing devices found to "
1292 bdev
= blkdev_get_by_path(device_path
, FMODE_READ
| FMODE_EXCL
,
1293 root
->fs_info
->bdev_holder
);
1295 ret
= PTR_ERR(bdev
);
1299 set_blocksize(bdev
, 4096);
1300 bh
= btrfs_read_dev_super(bdev
);
1305 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
1306 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
1307 dev_uuid
= disk_super
->dev_item
.uuid
;
1308 device
= btrfs_find_device(root
, devid
, dev_uuid
,
1316 if (device
->writeable
&& root
->fs_info
->fs_devices
->rw_devices
== 1) {
1317 printk(KERN_ERR
"btrfs: unable to remove the only writeable "
1323 if (device
->writeable
) {
1324 list_del_init(&device
->dev_alloc_list
);
1325 root
->fs_info
->fs_devices
->rw_devices
--;
1328 ret
= btrfs_shrink_device(device
, 0);
1332 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
1336 device
->in_fs_metadata
= 0;
1339 * the device list mutex makes sure that we don't change
1340 * the device list while someone else is writing out all
1341 * the device supers.
1343 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1344 list_del_init(&device
->dev_list
);
1345 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1347 device
->fs_devices
->num_devices
--;
1349 if (device
->missing
)
1350 root
->fs_info
->fs_devices
->missing_devices
--;
1352 next_device
= list_entry(root
->fs_info
->fs_devices
->devices
.next
,
1353 struct btrfs_device
, dev_list
);
1354 if (device
->bdev
== root
->fs_info
->sb
->s_bdev
)
1355 root
->fs_info
->sb
->s_bdev
= next_device
->bdev
;
1356 if (device
->bdev
== root
->fs_info
->fs_devices
->latest_bdev
)
1357 root
->fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1360 blkdev_put(device
->bdev
, device
->mode
);
1361 device
->bdev
= NULL
;
1362 device
->fs_devices
->open_devices
--;
1365 num_devices
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1366 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
, num_devices
);
1368 if (device
->fs_devices
->open_devices
== 0) {
1369 struct btrfs_fs_devices
*fs_devices
;
1370 fs_devices
= root
->fs_info
->fs_devices
;
1371 while (fs_devices
) {
1372 if (fs_devices
->seed
== device
->fs_devices
)
1374 fs_devices
= fs_devices
->seed
;
1376 fs_devices
->seed
= device
->fs_devices
->seed
;
1377 device
->fs_devices
->seed
= NULL
;
1378 __btrfs_close_devices(device
->fs_devices
);
1379 free_fs_devices(device
->fs_devices
);
1383 * at this point, the device is zero sized. We want to
1384 * remove it from the devices list and zero out the old super
1386 if (device
->writeable
) {
1387 /* make sure this device isn't detected as part of
1390 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
1391 set_buffer_dirty(bh
);
1392 sync_dirty_buffer(bh
);
1395 kfree(device
->name
);
1403 blkdev_put(bdev
, FMODE_READ
| FMODE_EXCL
);
1405 mutex_unlock(&root
->fs_info
->volume_mutex
);
1406 mutex_unlock(&uuid_mutex
);
1409 if (device
->writeable
) {
1410 list_add(&device
->dev_alloc_list
,
1411 &root
->fs_info
->fs_devices
->alloc_list
);
1412 root
->fs_info
->fs_devices
->rw_devices
++;
1418 * does all the dirty work required for changing file system's UUID.
1420 static int btrfs_prepare_sprout(struct btrfs_trans_handle
*trans
,
1421 struct btrfs_root
*root
)
1423 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
1424 struct btrfs_fs_devices
*old_devices
;
1425 struct btrfs_fs_devices
*seed_devices
;
1426 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
1427 struct btrfs_device
*device
;
1430 BUG_ON(!mutex_is_locked(&uuid_mutex
));
1431 if (!fs_devices
->seeding
)
1434 seed_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1438 old_devices
= clone_fs_devices(fs_devices
);
1439 if (IS_ERR(old_devices
)) {
1440 kfree(seed_devices
);
1441 return PTR_ERR(old_devices
);
1444 list_add(&old_devices
->list
, &fs_uuids
);
1446 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
1447 seed_devices
->opened
= 1;
1448 INIT_LIST_HEAD(&seed_devices
->devices
);
1449 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
1450 mutex_init(&seed_devices
->device_list_mutex
);
1451 list_splice_init(&fs_devices
->devices
, &seed_devices
->devices
);
1452 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
1453 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
) {
1454 device
->fs_devices
= seed_devices
;
1457 fs_devices
->seeding
= 0;
1458 fs_devices
->num_devices
= 0;
1459 fs_devices
->open_devices
= 0;
1460 fs_devices
->seed
= seed_devices
;
1462 generate_random_uuid(fs_devices
->fsid
);
1463 memcpy(root
->fs_info
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1464 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1465 super_flags
= btrfs_super_flags(disk_super
) &
1466 ~BTRFS_SUPER_FLAG_SEEDING
;
1467 btrfs_set_super_flags(disk_super
, super_flags
);
1473 * strore the expected generation for seed devices in device items.
1475 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
,
1476 struct btrfs_root
*root
)
1478 struct btrfs_path
*path
;
1479 struct extent_buffer
*leaf
;
1480 struct btrfs_dev_item
*dev_item
;
1481 struct btrfs_device
*device
;
1482 struct btrfs_key key
;
1483 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1484 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1488 path
= btrfs_alloc_path();
1492 root
= root
->fs_info
->chunk_root
;
1493 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1495 key
.type
= BTRFS_DEV_ITEM_KEY
;
1498 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1502 leaf
= path
->nodes
[0];
1504 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1505 ret
= btrfs_next_leaf(root
, path
);
1510 leaf
= path
->nodes
[0];
1511 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1512 btrfs_release_path(root
, path
);
1516 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1517 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
1518 key
.type
!= BTRFS_DEV_ITEM_KEY
)
1521 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1522 struct btrfs_dev_item
);
1523 devid
= btrfs_device_id(leaf
, dev_item
);
1524 read_extent_buffer(leaf
, dev_uuid
,
1525 (unsigned long)btrfs_device_uuid(dev_item
),
1527 read_extent_buffer(leaf
, fs_uuid
,
1528 (unsigned long)btrfs_device_fsid(dev_item
),
1530 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1533 if (device
->fs_devices
->seeding
) {
1534 btrfs_set_device_generation(leaf
, dev_item
,
1535 device
->generation
);
1536 btrfs_mark_buffer_dirty(leaf
);
1544 btrfs_free_path(path
);
1548 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1550 struct btrfs_trans_handle
*trans
;
1551 struct btrfs_device
*device
;
1552 struct block_device
*bdev
;
1553 struct list_head
*devices
;
1554 struct super_block
*sb
= root
->fs_info
->sb
;
1556 int seeding_dev
= 0;
1559 if ((sb
->s_flags
& MS_RDONLY
) && !root
->fs_info
->fs_devices
->seeding
)
1562 bdev
= blkdev_get_by_path(device_path
, FMODE_EXCL
,
1563 root
->fs_info
->bdev_holder
);
1565 return PTR_ERR(bdev
);
1567 if (root
->fs_info
->fs_devices
->seeding
) {
1569 down_write(&sb
->s_umount
);
1570 mutex_lock(&uuid_mutex
);
1573 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1574 mutex_lock(&root
->fs_info
->volume_mutex
);
1576 devices
= &root
->fs_info
->fs_devices
->devices
;
1578 * we have the volume lock, so we don't need the extra
1579 * device list mutex while reading the list here.
1581 list_for_each_entry(device
, devices
, dev_list
) {
1582 if (device
->bdev
== bdev
) {
1588 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1590 /* we can safely leave the fs_devices entry around */
1595 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1596 if (!device
->name
) {
1602 ret
= find_next_devid(root
, &device
->devid
);
1604 kfree(device
->name
);
1609 trans
= btrfs_start_transaction(root
, 0);
1610 if (IS_ERR(trans
)) {
1611 kfree(device
->name
);
1613 ret
= PTR_ERR(trans
);
1619 device
->writeable
= 1;
1620 device
->work
.func
= pending_bios_fn
;
1621 generate_random_uuid(device
->uuid
);
1622 spin_lock_init(&device
->io_lock
);
1623 device
->generation
= trans
->transid
;
1624 device
->io_width
= root
->sectorsize
;
1625 device
->io_align
= root
->sectorsize
;
1626 device
->sector_size
= root
->sectorsize
;
1627 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1628 device
->disk_total_bytes
= device
->total_bytes
;
1629 device
->dev_root
= root
->fs_info
->dev_root
;
1630 device
->bdev
= bdev
;
1631 device
->in_fs_metadata
= 1;
1632 device
->mode
= FMODE_EXCL
;
1633 set_blocksize(device
->bdev
, 4096);
1636 sb
->s_flags
&= ~MS_RDONLY
;
1637 ret
= btrfs_prepare_sprout(trans
, root
);
1641 device
->fs_devices
= root
->fs_info
->fs_devices
;
1644 * we don't want write_supers to jump in here with our device
1647 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1648 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1649 list_add(&device
->dev_alloc_list
,
1650 &root
->fs_info
->fs_devices
->alloc_list
);
1651 root
->fs_info
->fs_devices
->num_devices
++;
1652 root
->fs_info
->fs_devices
->open_devices
++;
1653 root
->fs_info
->fs_devices
->rw_devices
++;
1654 root
->fs_info
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
1656 if (!blk_queue_nonrot(bdev_get_queue(bdev
)))
1657 root
->fs_info
->fs_devices
->rotating
= 1;
1659 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1660 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1661 total_bytes
+ device
->total_bytes
);
1663 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1664 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1666 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1669 ret
= init_first_rw_device(trans
, root
, device
);
1671 ret
= btrfs_finish_sprout(trans
, root
);
1674 ret
= btrfs_add_device(trans
, root
, device
);
1678 * we've got more storage, clear any full flags on the space
1681 btrfs_clear_space_info_full(root
->fs_info
);
1683 unlock_chunks(root
);
1684 btrfs_commit_transaction(trans
, root
);
1687 mutex_unlock(&uuid_mutex
);
1688 up_write(&sb
->s_umount
);
1690 ret
= btrfs_relocate_sys_chunks(root
);
1694 mutex_unlock(&root
->fs_info
->volume_mutex
);
1697 blkdev_put(bdev
, FMODE_EXCL
);
1699 mutex_unlock(&uuid_mutex
);
1700 up_write(&sb
->s_umount
);
1705 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
1706 struct btrfs_device
*device
)
1709 struct btrfs_path
*path
;
1710 struct btrfs_root
*root
;
1711 struct btrfs_dev_item
*dev_item
;
1712 struct extent_buffer
*leaf
;
1713 struct btrfs_key key
;
1715 root
= device
->dev_root
->fs_info
->chunk_root
;
1717 path
= btrfs_alloc_path();
1721 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1722 key
.type
= BTRFS_DEV_ITEM_KEY
;
1723 key
.offset
= device
->devid
;
1725 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1734 leaf
= path
->nodes
[0];
1735 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1737 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1738 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1739 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1740 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1741 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1742 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->disk_total_bytes
);
1743 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1744 btrfs_mark_buffer_dirty(leaf
);
1747 btrfs_free_path(path
);
1751 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1752 struct btrfs_device
*device
, u64 new_size
)
1754 struct btrfs_super_block
*super_copy
=
1755 &device
->dev_root
->fs_info
->super_copy
;
1756 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1757 u64 diff
= new_size
- device
->total_bytes
;
1759 if (!device
->writeable
)
1761 if (new_size
<= device
->total_bytes
)
1764 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1765 device
->fs_devices
->total_rw_bytes
+= diff
;
1767 device
->total_bytes
= new_size
;
1768 device
->disk_total_bytes
= new_size
;
1769 btrfs_clear_space_info_full(device
->dev_root
->fs_info
);
1771 return btrfs_update_device(trans
, device
);
1774 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1775 struct btrfs_device
*device
, u64 new_size
)
1778 lock_chunks(device
->dev_root
);
1779 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1780 unlock_chunks(device
->dev_root
);
1784 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1785 struct btrfs_root
*root
,
1786 u64 chunk_tree
, u64 chunk_objectid
,
1790 struct btrfs_path
*path
;
1791 struct btrfs_key key
;
1793 root
= root
->fs_info
->chunk_root
;
1794 path
= btrfs_alloc_path();
1798 key
.objectid
= chunk_objectid
;
1799 key
.offset
= chunk_offset
;
1800 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1802 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1805 ret
= btrfs_del_item(trans
, root
, path
);
1808 btrfs_free_path(path
);
1812 static int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1815 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1816 struct btrfs_disk_key
*disk_key
;
1817 struct btrfs_chunk
*chunk
;
1824 struct btrfs_key key
;
1826 array_size
= btrfs_super_sys_array_size(super_copy
);
1828 ptr
= super_copy
->sys_chunk_array
;
1831 while (cur
< array_size
) {
1832 disk_key
= (struct btrfs_disk_key
*)ptr
;
1833 btrfs_disk_key_to_cpu(&key
, disk_key
);
1835 len
= sizeof(*disk_key
);
1837 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1838 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1839 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1840 len
+= btrfs_chunk_item_size(num_stripes
);
1845 if (key
.objectid
== chunk_objectid
&&
1846 key
.offset
== chunk_offset
) {
1847 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1849 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1858 static int btrfs_relocate_chunk(struct btrfs_root
*root
,
1859 u64 chunk_tree
, u64 chunk_objectid
,
1862 struct extent_map_tree
*em_tree
;
1863 struct btrfs_root
*extent_root
;
1864 struct btrfs_trans_handle
*trans
;
1865 struct extent_map
*em
;
1866 struct map_lookup
*map
;
1870 root
= root
->fs_info
->chunk_root
;
1871 extent_root
= root
->fs_info
->extent_root
;
1872 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1874 ret
= btrfs_can_relocate(extent_root
, chunk_offset
);
1878 /* step one, relocate all the extents inside this chunk */
1879 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1883 trans
= btrfs_start_transaction(root
, 0);
1884 BUG_ON(IS_ERR(trans
));
1889 * step two, delete the device extents and the
1890 * chunk tree entries
1892 read_lock(&em_tree
->lock
);
1893 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1894 read_unlock(&em_tree
->lock
);
1896 BUG_ON(em
->start
> chunk_offset
||
1897 em
->start
+ em
->len
< chunk_offset
);
1898 map
= (struct map_lookup
*)em
->bdev
;
1900 for (i
= 0; i
< map
->num_stripes
; i
++) {
1901 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1902 map
->stripes
[i
].physical
);
1905 if (map
->stripes
[i
].dev
) {
1906 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1910 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1915 trace_btrfs_chunk_free(root
, map
, chunk_offset
, em
->len
);
1917 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1918 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1922 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1925 write_lock(&em_tree
->lock
);
1926 remove_extent_mapping(em_tree
, em
);
1927 write_unlock(&em_tree
->lock
);
1932 /* once for the tree */
1933 free_extent_map(em
);
1935 free_extent_map(em
);
1937 unlock_chunks(root
);
1938 btrfs_end_transaction(trans
, root
);
1942 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
)
1944 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
1945 struct btrfs_path
*path
;
1946 struct extent_buffer
*leaf
;
1947 struct btrfs_chunk
*chunk
;
1948 struct btrfs_key key
;
1949 struct btrfs_key found_key
;
1950 u64 chunk_tree
= chunk_root
->root_key
.objectid
;
1952 bool retried
= false;
1956 path
= btrfs_alloc_path();
1961 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1962 key
.offset
= (u64
)-1;
1963 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1966 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1971 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
1978 leaf
= path
->nodes
[0];
1979 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1981 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
1982 struct btrfs_chunk
);
1983 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
1984 btrfs_release_path(chunk_root
, path
);
1986 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1987 ret
= btrfs_relocate_chunk(chunk_root
, chunk_tree
,
1996 if (found_key
.offset
== 0)
1998 key
.offset
= found_key
.offset
- 1;
2001 if (failed
&& !retried
) {
2005 } else if (failed
&& retried
) {
2010 btrfs_free_path(path
);
2014 static u64
div_factor(u64 num
, int factor
)
2023 int btrfs_balance(struct btrfs_root
*dev_root
)
2026 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
2027 struct btrfs_device
*device
;
2030 struct btrfs_path
*path
;
2031 struct btrfs_key key
;
2032 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
2033 struct btrfs_trans_handle
*trans
;
2034 struct btrfs_key found_key
;
2036 if (dev_root
->fs_info
->sb
->s_flags
& MS_RDONLY
)
2039 if (!capable(CAP_SYS_ADMIN
))
2042 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
2043 dev_root
= dev_root
->fs_info
->dev_root
;
2045 /* step one make some room on all the devices */
2046 list_for_each_entry(device
, devices
, dev_list
) {
2047 old_size
= device
->total_bytes
;
2048 size_to_free
= div_factor(old_size
, 1);
2049 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
2050 if (!device
->writeable
||
2051 device
->total_bytes
- device
->bytes_used
> size_to_free
)
2054 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
2059 trans
= btrfs_start_transaction(dev_root
, 0);
2060 BUG_ON(IS_ERR(trans
));
2062 ret
= btrfs_grow_device(trans
, device
, old_size
);
2065 btrfs_end_transaction(trans
, dev_root
);
2068 /* step two, relocate all the chunks */
2069 path
= btrfs_alloc_path();
2072 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2073 key
.offset
= (u64
)-1;
2074 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2077 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
2082 * this shouldn't happen, it means the last relocate
2088 ret
= btrfs_previous_item(chunk_root
, path
, 0,
2089 BTRFS_CHUNK_ITEM_KEY
);
2093 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2095 if (found_key
.objectid
!= key
.objectid
)
2098 /* chunk zero is special */
2099 if (found_key
.offset
== 0)
2102 btrfs_release_path(chunk_root
, path
);
2103 ret
= btrfs_relocate_chunk(chunk_root
,
2104 chunk_root
->root_key
.objectid
,
2107 BUG_ON(ret
&& ret
!= -ENOSPC
);
2108 key
.offset
= found_key
.offset
- 1;
2112 btrfs_free_path(path
);
2113 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
2118 * shrinking a device means finding all of the device extents past
2119 * the new size, and then following the back refs to the chunks.
2120 * The chunk relocation code actually frees the device extent
2122 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
2124 struct btrfs_trans_handle
*trans
;
2125 struct btrfs_root
*root
= device
->dev_root
;
2126 struct btrfs_dev_extent
*dev_extent
= NULL
;
2127 struct btrfs_path
*path
;
2135 bool retried
= false;
2136 struct extent_buffer
*l
;
2137 struct btrfs_key key
;
2138 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2139 u64 old_total
= btrfs_super_total_bytes(super_copy
);
2140 u64 old_size
= device
->total_bytes
;
2141 u64 diff
= device
->total_bytes
- new_size
;
2143 if (new_size
>= device
->total_bytes
)
2146 path
= btrfs_alloc_path();
2154 device
->total_bytes
= new_size
;
2155 if (device
->writeable
)
2156 device
->fs_devices
->total_rw_bytes
-= diff
;
2157 unlock_chunks(root
);
2160 key
.objectid
= device
->devid
;
2161 key
.offset
= (u64
)-1;
2162 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2165 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2169 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
2174 btrfs_release_path(root
, path
);
2179 slot
= path
->slots
[0];
2180 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
2182 if (key
.objectid
!= device
->devid
) {
2183 btrfs_release_path(root
, path
);
2187 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2188 length
= btrfs_dev_extent_length(l
, dev_extent
);
2190 if (key
.offset
+ length
<= new_size
) {
2191 btrfs_release_path(root
, path
);
2195 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2196 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2197 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2198 btrfs_release_path(root
, path
);
2200 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
2202 if (ret
&& ret
!= -ENOSPC
)
2209 if (failed
&& !retried
) {
2213 } else if (failed
&& retried
) {
2217 device
->total_bytes
= old_size
;
2218 if (device
->writeable
)
2219 device
->fs_devices
->total_rw_bytes
+= diff
;
2220 unlock_chunks(root
);
2224 /* Shrinking succeeded, else we would be at "done". */
2225 trans
= btrfs_start_transaction(root
, 0);
2226 if (IS_ERR(trans
)) {
2227 ret
= PTR_ERR(trans
);
2233 device
->disk_total_bytes
= new_size
;
2234 /* Now btrfs_update_device() will change the on-disk size. */
2235 ret
= btrfs_update_device(trans
, device
);
2237 unlock_chunks(root
);
2238 btrfs_end_transaction(trans
, root
);
2241 WARN_ON(diff
> old_total
);
2242 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
2243 unlock_chunks(root
);
2244 btrfs_end_transaction(trans
, root
);
2246 btrfs_free_path(path
);
2250 static int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
2251 struct btrfs_root
*root
,
2252 struct btrfs_key
*key
,
2253 struct btrfs_chunk
*chunk
, int item_size
)
2255 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2256 struct btrfs_disk_key disk_key
;
2260 array_size
= btrfs_super_sys_array_size(super_copy
);
2261 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
2264 ptr
= super_copy
->sys_chunk_array
+ array_size
;
2265 btrfs_cpu_key_to_disk(&disk_key
, key
);
2266 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
2267 ptr
+= sizeof(disk_key
);
2268 memcpy(ptr
, chunk
, item_size
);
2269 item_size
+= sizeof(disk_key
);
2270 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
2274 static noinline u64
chunk_bytes_by_type(u64 type
, u64 calc_size
,
2275 int num_stripes
, int sub_stripes
)
2277 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
2279 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
2280 return calc_size
* (num_stripes
/ sub_stripes
);
2282 return calc_size
* num_stripes
;
2285 /* Used to sort the devices by max_avail(descending sort) */
2286 int btrfs_cmp_device_free_bytes(const void *dev_info1
, const void *dev_info2
)
2288 if (((struct btrfs_device_info
*)dev_info1
)->max_avail
>
2289 ((struct btrfs_device_info
*)dev_info2
)->max_avail
)
2291 else if (((struct btrfs_device_info
*)dev_info1
)->max_avail
<
2292 ((struct btrfs_device_info
*)dev_info2
)->max_avail
)
2298 static int __btrfs_calc_nstripes(struct btrfs_fs_devices
*fs_devices
, u64 type
,
2299 int *num_stripes
, int *min_stripes
,
2306 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
2307 *num_stripes
= fs_devices
->rw_devices
;
2310 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
2314 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
2315 if (fs_devices
->rw_devices
< 2)
2320 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2321 *num_stripes
= fs_devices
->rw_devices
;
2322 if (*num_stripes
< 4)
2324 *num_stripes
&= ~(u32
)1;
2332 static u64
__btrfs_calc_stripe_size(struct btrfs_fs_devices
*fs_devices
,
2333 u64 proposed_size
, u64 type
,
2334 int num_stripes
, int small_stripe
)
2336 int min_stripe_size
= 1 * 1024 * 1024;
2337 u64 calc_size
= proposed_size
;
2338 u64 max_chunk_size
= calc_size
;
2341 if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
2342 BTRFS_BLOCK_GROUP_DUP
|
2343 BTRFS_BLOCK_GROUP_RAID10
))
2346 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
2347 max_chunk_size
= 10 * calc_size
;
2348 min_stripe_size
= 64 * 1024 * 1024;
2349 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
2350 max_chunk_size
= 256 * 1024 * 1024;
2351 min_stripe_size
= 32 * 1024 * 1024;
2352 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2353 calc_size
= 8 * 1024 * 1024;
2354 max_chunk_size
= calc_size
* 2;
2355 min_stripe_size
= 1 * 1024 * 1024;
2358 /* we don't want a chunk larger than 10% of writeable space */
2359 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
2362 if (calc_size
* num_stripes
> max_chunk_size
* ncopies
) {
2363 calc_size
= max_chunk_size
* ncopies
;
2364 do_div(calc_size
, num_stripes
);
2365 do_div(calc_size
, BTRFS_STRIPE_LEN
);
2366 calc_size
*= BTRFS_STRIPE_LEN
;
2369 /* we don't want tiny stripes */
2371 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
2374 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
2375 * we end up with something bigger than a stripe
2377 calc_size
= max_t(u64
, calc_size
, BTRFS_STRIPE_LEN
);
2379 do_div(calc_size
, BTRFS_STRIPE_LEN
);
2380 calc_size
*= BTRFS_STRIPE_LEN
;
2385 static struct map_lookup
*__shrink_map_lookup_stripes(struct map_lookup
*map
,
2388 struct map_lookup
*new;
2389 size_t len
= map_lookup_size(num_stripes
);
2391 BUG_ON(map
->num_stripes
< num_stripes
);
2393 if (map
->num_stripes
== num_stripes
)
2396 new = kmalloc(len
, GFP_NOFS
);
2398 /* just change map->num_stripes */
2399 map
->num_stripes
= num_stripes
;
2403 memcpy(new, map
, len
);
2404 new->num_stripes
= num_stripes
;
2410 * helper to allocate device space from btrfs_device_info, in which we stored
2411 * max free space information of every device. It is used when we can not
2412 * allocate chunks by default size.
2414 * By this helper, we can allocate a new chunk as larger as possible.
2416 static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle
*trans
,
2417 struct btrfs_fs_devices
*fs_devices
,
2418 struct btrfs_device_info
*devices
,
2419 int nr_device
, u64 type
,
2420 struct map_lookup
**map_lookup
,
2421 int min_stripes
, u64
*stripe_size
)
2423 int i
, index
, sort_again
= 0;
2424 int min_devices
= min_stripes
;
2425 u64 max_avail
, min_free
;
2426 struct map_lookup
*map
= *map_lookup
;
2429 if (nr_device
< min_stripes
)
2432 btrfs_descending_sort_devices(devices
, nr_device
);
2434 max_avail
= devices
[0].max_avail
;
2438 for (i
= 0; i
< nr_device
; i
++) {
2440 * if dev_offset = 0, it means the free space of this device
2441 * is less than what we need, and we didn't search max avail
2442 * extent on this device, so do it now.
2444 if (!devices
[i
].dev_offset
) {
2445 ret
= find_free_dev_extent(trans
, devices
[i
].dev
,
2447 &devices
[i
].dev_offset
,
2448 &devices
[i
].max_avail
);
2449 if (ret
!= 0 && ret
!= -ENOSPC
)
2455 /* we update the max avail free extent of each devices, sort again */
2457 btrfs_descending_sort_devices(devices
, nr_device
);
2459 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2462 if (!devices
[min_devices
- 1].max_avail
)
2465 max_avail
= devices
[min_devices
- 1].max_avail
;
2466 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2467 do_div(max_avail
, 2);
2469 max_avail
= __btrfs_calc_stripe_size(fs_devices
, max_avail
, type
,
2471 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2472 min_free
= max_avail
* 2;
2474 min_free
= max_avail
;
2476 if (min_free
> devices
[min_devices
- 1].max_avail
)
2479 map
= __shrink_map_lookup_stripes(map
, min_stripes
);
2480 *stripe_size
= max_avail
;
2483 for (i
= 0; i
< min_stripes
; i
++) {
2484 map
->stripes
[i
].dev
= devices
[index
].dev
;
2485 map
->stripes
[i
].physical
= devices
[index
].dev_offset
;
2486 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2488 map
->stripes
[i
].dev
= devices
[index
].dev
;
2489 map
->stripes
[i
].physical
= devices
[index
].dev_offset
+
2499 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2500 struct btrfs_root
*extent_root
,
2501 struct map_lookup
**map_ret
,
2502 u64
*num_bytes
, u64
*stripe_size
,
2503 u64 start
, u64 type
)
2505 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2506 struct btrfs_device
*device
= NULL
;
2507 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
2508 struct list_head
*cur
;
2509 struct map_lookup
*map
;
2510 struct extent_map_tree
*em_tree
;
2511 struct extent_map
*em
;
2512 struct btrfs_device_info
*devices_info
;
2513 struct list_head private_devs
;
2514 u64 calc_size
= 1024 * 1024 * 1024;
2521 int min_devices
; /* the min number of devices we need */
2526 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
2527 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
2529 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
2531 if (list_empty(&fs_devices
->alloc_list
))
2534 ret
= __btrfs_calc_nstripes(fs_devices
, type
, &num_stripes
,
2535 &min_stripes
, &sub_stripes
);
2539 devices_info
= kzalloc(sizeof(*devices_info
) * fs_devices
->rw_devices
,
2544 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2549 map
->num_stripes
= num_stripes
;
2551 cur
= fs_devices
->alloc_list
.next
;
2555 calc_size
= __btrfs_calc_stripe_size(fs_devices
, calc_size
, type
,
2558 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2559 min_free
= calc_size
* 2;
2562 min_free
= calc_size
;
2563 min_devices
= min_stripes
;
2566 INIT_LIST_HEAD(&private_devs
);
2567 while (index
< num_stripes
) {
2568 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
2569 BUG_ON(!device
->writeable
);
2570 if (device
->total_bytes
> device
->bytes_used
)
2571 avail
= device
->total_bytes
- device
->bytes_used
;
2576 if (device
->in_fs_metadata
&& avail
>= min_free
) {
2577 ret
= find_free_dev_extent(trans
, device
, min_free
,
2578 &devices_info
[i
].dev_offset
,
2579 &devices_info
[i
].max_avail
);
2581 list_move_tail(&device
->dev_alloc_list
,
2583 map
->stripes
[index
].dev
= device
;
2584 map
->stripes
[index
].physical
=
2585 devices_info
[i
].dev_offset
;
2587 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2588 map
->stripes
[index
].dev
= device
;
2589 map
->stripes
[index
].physical
=
2590 devices_info
[i
].dev_offset
+
2594 } else if (ret
!= -ENOSPC
)
2597 devices_info
[i
].dev
= device
;
2599 } else if (device
->in_fs_metadata
&&
2600 avail
>= BTRFS_STRIPE_LEN
) {
2601 devices_info
[i
].dev
= device
;
2602 devices_info
[i
].max_avail
= avail
;
2606 if (cur
== &fs_devices
->alloc_list
)
2610 list_splice(&private_devs
, &fs_devices
->alloc_list
);
2611 if (index
< num_stripes
) {
2612 if (index
>= min_stripes
) {
2613 num_stripes
= index
;
2614 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2615 num_stripes
/= sub_stripes
;
2616 num_stripes
*= sub_stripes
;
2619 map
= __shrink_map_lookup_stripes(map
, num_stripes
);
2620 } else if (i
>= min_devices
) {
2621 ret
= __btrfs_alloc_tiny_space(trans
, fs_devices
,
2622 devices_info
, i
, type
,
2632 map
->sector_size
= extent_root
->sectorsize
;
2633 map
->stripe_len
= BTRFS_STRIPE_LEN
;
2634 map
->io_align
= BTRFS_STRIPE_LEN
;
2635 map
->io_width
= BTRFS_STRIPE_LEN
;
2637 map
->sub_stripes
= sub_stripes
;
2640 *stripe_size
= calc_size
;
2641 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
2642 map
->num_stripes
, sub_stripes
);
2644 trace_btrfs_chunk_alloc(info
->chunk_root
, map
, start
, *num_bytes
);
2646 em
= alloc_extent_map(GFP_NOFS
);
2651 em
->bdev
= (struct block_device
*)map
;
2653 em
->len
= *num_bytes
;
2654 em
->block_start
= 0;
2655 em
->block_len
= em
->len
;
2657 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
2658 write_lock(&em_tree
->lock
);
2659 ret
= add_extent_mapping(em_tree
, em
);
2660 write_unlock(&em_tree
->lock
);
2662 free_extent_map(em
);
2664 ret
= btrfs_make_block_group(trans
, extent_root
, 0, type
,
2665 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2670 while (index
< map
->num_stripes
) {
2671 device
= map
->stripes
[index
].dev
;
2672 dev_offset
= map
->stripes
[index
].physical
;
2674 ret
= btrfs_alloc_dev_extent(trans
, device
,
2675 info
->chunk_root
->root_key
.objectid
,
2676 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2677 start
, dev_offset
, calc_size
);
2682 kfree(devices_info
);
2687 kfree(devices_info
);
2691 static int __finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
2692 struct btrfs_root
*extent_root
,
2693 struct map_lookup
*map
, u64 chunk_offset
,
2694 u64 chunk_size
, u64 stripe_size
)
2697 struct btrfs_key key
;
2698 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2699 struct btrfs_device
*device
;
2700 struct btrfs_chunk
*chunk
;
2701 struct btrfs_stripe
*stripe
;
2702 size_t item_size
= btrfs_chunk_item_size(map
->num_stripes
);
2706 chunk
= kzalloc(item_size
, GFP_NOFS
);
2711 while (index
< map
->num_stripes
) {
2712 device
= map
->stripes
[index
].dev
;
2713 device
->bytes_used
+= stripe_size
;
2714 ret
= btrfs_update_device(trans
, device
);
2720 stripe
= &chunk
->stripe
;
2721 while (index
< map
->num_stripes
) {
2722 device
= map
->stripes
[index
].dev
;
2723 dev_offset
= map
->stripes
[index
].physical
;
2725 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
2726 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
2727 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
2732 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
2733 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
2734 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
2735 btrfs_set_stack_chunk_type(chunk
, map
->type
);
2736 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
2737 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
2738 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
2739 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
2740 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
2742 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2743 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2744 key
.offset
= chunk_offset
;
2746 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
2749 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2750 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
, chunk
,
2760 * Chunk allocation falls into two parts. The first part does works
2761 * that make the new allocated chunk useable, but not do any operation
2762 * that modifies the chunk tree. The second part does the works that
2763 * require modifying the chunk tree. This division is important for the
2764 * bootstrap process of adding storage to a seed btrfs.
2766 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2767 struct btrfs_root
*extent_root
, u64 type
)
2772 struct map_lookup
*map
;
2773 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2776 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2781 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2782 &stripe_size
, chunk_offset
, type
);
2786 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2787 chunk_size
, stripe_size
);
2792 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
,
2793 struct btrfs_root
*root
,
2794 struct btrfs_device
*device
)
2797 u64 sys_chunk_offset
;
2801 u64 sys_stripe_size
;
2803 struct map_lookup
*map
;
2804 struct map_lookup
*sys_map
;
2805 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2806 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2809 ret
= find_next_chunk(fs_info
->chunk_root
,
2810 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, &chunk_offset
);
2813 alloc_profile
= BTRFS_BLOCK_GROUP_METADATA
|
2814 (fs_info
->metadata_alloc_profile
&
2815 fs_info
->avail_metadata_alloc_bits
);
2816 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2818 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2819 &stripe_size
, chunk_offset
, alloc_profile
);
2822 sys_chunk_offset
= chunk_offset
+ chunk_size
;
2824 alloc_profile
= BTRFS_BLOCK_GROUP_SYSTEM
|
2825 (fs_info
->system_alloc_profile
&
2826 fs_info
->avail_system_alloc_bits
);
2827 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2829 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &sys_map
,
2830 &sys_chunk_size
, &sys_stripe_size
,
2831 sys_chunk_offset
, alloc_profile
);
2834 ret
= btrfs_add_device(trans
, fs_info
->chunk_root
, device
);
2838 * Modifying chunk tree needs allocating new blocks from both
2839 * system block group and metadata block group. So we only can
2840 * do operations require modifying the chunk tree after both
2841 * block groups were created.
2843 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2844 chunk_size
, stripe_size
);
2847 ret
= __finish_chunk_alloc(trans
, extent_root
, sys_map
,
2848 sys_chunk_offset
, sys_chunk_size
,
2854 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
2856 struct extent_map
*em
;
2857 struct map_lookup
*map
;
2858 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2862 read_lock(&map_tree
->map_tree
.lock
);
2863 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2864 read_unlock(&map_tree
->map_tree
.lock
);
2868 if (btrfs_test_opt(root
, DEGRADED
)) {
2869 free_extent_map(em
);
2873 map
= (struct map_lookup
*)em
->bdev
;
2874 for (i
= 0; i
< map
->num_stripes
; i
++) {
2875 if (!map
->stripes
[i
].dev
->writeable
) {
2880 free_extent_map(em
);
2884 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
2886 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
2889 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
2891 struct extent_map
*em
;
2894 write_lock(&tree
->map_tree
.lock
);
2895 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
2897 remove_extent_mapping(&tree
->map_tree
, em
);
2898 write_unlock(&tree
->map_tree
.lock
);
2903 free_extent_map(em
);
2904 /* once for the tree */
2905 free_extent_map(em
);
2909 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
2911 struct extent_map
*em
;
2912 struct map_lookup
*map
;
2913 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2916 read_lock(&em_tree
->lock
);
2917 em
= lookup_extent_mapping(em_tree
, logical
, len
);
2918 read_unlock(&em_tree
->lock
);
2921 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2922 map
= (struct map_lookup
*)em
->bdev
;
2923 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
2924 ret
= map
->num_stripes
;
2925 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2926 ret
= map
->sub_stripes
;
2929 free_extent_map(em
);
2933 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
2937 if (map
->stripes
[optimal
].dev
->bdev
)
2939 for (i
= first
; i
< first
+ num
; i
++) {
2940 if (map
->stripes
[i
].dev
->bdev
)
2943 /* we couldn't find one that doesn't fail. Just return something
2944 * and the io error handling code will clean up eventually
2949 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2950 u64 logical
, u64
*length
,
2951 struct btrfs_multi_bio
**multi_ret
,
2952 int mirror_num
, struct page
*unplug_page
)
2954 struct extent_map
*em
;
2955 struct map_lookup
*map
;
2956 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2959 u64 stripe_end_offset
;
2963 int stripes_allocated
= 8;
2964 int stripes_required
= 1;
2969 struct btrfs_multi_bio
*multi
= NULL
;
2971 if (multi_ret
&& !(rw
& (REQ_WRITE
| REQ_DISCARD
)))
2972 stripes_allocated
= 1;
2975 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
2980 atomic_set(&multi
->error
, 0);
2983 read_lock(&em_tree
->lock
);
2984 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
2985 read_unlock(&em_tree
->lock
);
2987 if (!em
&& unplug_page
) {
2993 printk(KERN_CRIT
"unable to find logical %llu len %llu\n",
2994 (unsigned long long)logical
,
2995 (unsigned long long)*length
);
2999 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
3000 map
= (struct map_lookup
*)em
->bdev
;
3001 offset
= logical
- em
->start
;
3003 if (mirror_num
> map
->num_stripes
)
3006 /* if our multi bio struct is too small, back off and try again */
3007 if (rw
& REQ_WRITE
) {
3008 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
3009 BTRFS_BLOCK_GROUP_DUP
)) {
3010 stripes_required
= map
->num_stripes
;
3012 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3013 stripes_required
= map
->sub_stripes
;
3017 if (rw
& REQ_DISCARD
) {
3018 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
3019 BTRFS_BLOCK_GROUP_RAID1
|
3020 BTRFS_BLOCK_GROUP_DUP
|
3021 BTRFS_BLOCK_GROUP_RAID10
)) {
3022 stripes_required
= map
->num_stripes
;
3025 if (multi_ret
&& (rw
& (REQ_WRITE
| REQ_DISCARD
)) &&
3026 stripes_allocated
< stripes_required
) {
3027 stripes_allocated
= map
->num_stripes
;
3028 free_extent_map(em
);
3034 * stripe_nr counts the total number of stripes we have to stride
3035 * to get to this block
3037 do_div(stripe_nr
, map
->stripe_len
);
3039 stripe_offset
= stripe_nr
* map
->stripe_len
;
3040 BUG_ON(offset
< stripe_offset
);
3042 /* stripe_offset is the offset of this block in its stripe*/
3043 stripe_offset
= offset
- stripe_offset
;
3045 if (rw
& REQ_DISCARD
)
3046 *length
= min_t(u64
, em
->len
- offset
, *length
);
3047 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
3048 BTRFS_BLOCK_GROUP_RAID1
|
3049 BTRFS_BLOCK_GROUP_RAID10
|
3050 BTRFS_BLOCK_GROUP_DUP
)) {
3051 /* we limit the length of each bio to what fits in a stripe */
3052 *length
= min_t(u64
, em
->len
- offset
,
3053 map
->stripe_len
- stripe_offset
);
3055 *length
= em
->len
- offset
;
3058 if (!multi_ret
&& !unplug_page
)
3063 stripe_nr_orig
= stripe_nr
;
3064 stripe_nr_end
= (offset
+ *length
+ map
->stripe_len
- 1) &
3065 (~(map
->stripe_len
- 1));
3066 do_div(stripe_nr_end
, map
->stripe_len
);
3067 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
3069 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3070 if (rw
& REQ_DISCARD
)
3071 num_stripes
= min_t(u64
, map
->num_stripes
,
3072 stripe_nr_end
- stripe_nr_orig
);
3073 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
3074 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3075 if (unplug_page
|| (rw
& (REQ_WRITE
| REQ_DISCARD
)))
3076 num_stripes
= map
->num_stripes
;
3077 else if (mirror_num
)
3078 stripe_index
= mirror_num
- 1;
3080 stripe_index
= find_live_mirror(map
, 0,
3082 current
->pid
% map
->num_stripes
);
3085 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3086 if (rw
& (REQ_WRITE
| REQ_DISCARD
))
3087 num_stripes
= map
->num_stripes
;
3088 else if (mirror_num
)
3089 stripe_index
= mirror_num
- 1;
3091 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3092 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3094 stripe_index
= do_div(stripe_nr
, factor
);
3095 stripe_index
*= map
->sub_stripes
;
3097 if (unplug_page
|| (rw
& REQ_WRITE
))
3098 num_stripes
= map
->sub_stripes
;
3099 else if (rw
& REQ_DISCARD
)
3100 num_stripes
= min_t(u64
, map
->sub_stripes
*
3101 (stripe_nr_end
- stripe_nr_orig
),
3103 else if (mirror_num
)
3104 stripe_index
+= mirror_num
- 1;
3106 stripe_index
= find_live_mirror(map
, stripe_index
,
3107 map
->sub_stripes
, stripe_index
+
3108 current
->pid
% map
->sub_stripes
);
3112 * after this do_div call, stripe_nr is the number of stripes
3113 * on this device we have to walk to find the data, and
3114 * stripe_index is the number of our device in the stripe array
3116 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
3118 BUG_ON(stripe_index
>= map
->num_stripes
);
3120 if (rw
& REQ_DISCARD
) {
3121 for (i
= 0; i
< num_stripes
; i
++) {
3122 multi
->stripes
[i
].physical
=
3123 map
->stripes
[stripe_index
].physical
+
3124 stripe_offset
+ stripe_nr
* map
->stripe_len
;
3125 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
3127 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3129 u32 last_stripe
= 0;
3132 div_u64_rem(stripe_nr_end
- 1,
3136 for (j
= 0; j
< map
->num_stripes
; j
++) {
3139 div_u64_rem(stripe_nr_end
- 1 - j
,
3140 map
->num_stripes
, &test
);
3141 if (test
== stripe_index
)
3144 stripes
= stripe_nr_end
- 1 - j
;
3145 do_div(stripes
, map
->num_stripes
);
3146 multi
->stripes
[i
].length
= map
->stripe_len
*
3147 (stripes
- stripe_nr
+ 1);
3150 multi
->stripes
[i
].length
-=
3154 if (stripe_index
== last_stripe
)
3155 multi
->stripes
[i
].length
-=
3157 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3160 int factor
= map
->num_stripes
/
3162 u32 last_stripe
= 0;
3164 div_u64_rem(stripe_nr_end
- 1,
3165 factor
, &last_stripe
);
3166 last_stripe
*= map
->sub_stripes
;
3168 for (j
= 0; j
< factor
; j
++) {
3171 div_u64_rem(stripe_nr_end
- 1 - j
,
3175 stripe_index
/ map
->sub_stripes
)
3178 stripes
= stripe_nr_end
- 1 - j
;
3179 do_div(stripes
, factor
);
3180 multi
->stripes
[i
].length
= map
->stripe_len
*
3181 (stripes
- stripe_nr
+ 1);
3183 if (i
< map
->sub_stripes
) {
3184 multi
->stripes
[i
].length
-=
3186 if (i
== map
->sub_stripes
- 1)
3189 if (stripe_index
>= last_stripe
&&
3190 stripe_index
<= (last_stripe
+
3191 map
->sub_stripes
- 1)) {
3192 multi
->stripes
[i
].length
-=
3196 multi
->stripes
[i
].length
= *length
;
3199 if (stripe_index
== map
->num_stripes
) {
3200 /* This could only happen for RAID0/10 */
3206 for (i
= 0; i
< num_stripes
; i
++) {
3208 struct btrfs_device
*device
;
3209 struct backing_dev_info
*bdi
;
3211 device
= map
->stripes
[stripe_index
].dev
;
3213 bdi
= blk_get_backing_dev_info(device
->
3215 if (bdi
->unplug_io_fn
)
3216 bdi
->unplug_io_fn(bdi
,
3220 multi
->stripes
[i
].physical
=
3221 map
->stripes
[stripe_index
].physical
+
3223 stripe_nr
* map
->stripe_len
;
3224 multi
->stripes
[i
].dev
=
3225 map
->stripes
[stripe_index
].dev
;
3232 multi
->num_stripes
= num_stripes
;
3233 multi
->max_errors
= max_errors
;
3236 free_extent_map(em
);
3240 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
3241 u64 logical
, u64
*length
,
3242 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
3244 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
3248 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
3249 u64 chunk_start
, u64 physical
, u64 devid
,
3250 u64
**logical
, int *naddrs
, int *stripe_len
)
3252 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
3253 struct extent_map
*em
;
3254 struct map_lookup
*map
;
3261 read_lock(&em_tree
->lock
);
3262 em
= lookup_extent_mapping(em_tree
, chunk_start
, 1);
3263 read_unlock(&em_tree
->lock
);
3265 BUG_ON(!em
|| em
->start
!= chunk_start
);
3266 map
= (struct map_lookup
*)em
->bdev
;
3269 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
3270 do_div(length
, map
->num_stripes
/ map
->sub_stripes
);
3271 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
3272 do_div(length
, map
->num_stripes
);
3274 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
3277 for (i
= 0; i
< map
->num_stripes
; i
++) {
3278 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
3280 if (map
->stripes
[i
].physical
> physical
||
3281 map
->stripes
[i
].physical
+ length
<= physical
)
3284 stripe_nr
= physical
- map
->stripes
[i
].physical
;
3285 do_div(stripe_nr
, map
->stripe_len
);
3287 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3288 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
3289 do_div(stripe_nr
, map
->sub_stripes
);
3290 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3291 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
3293 bytenr
= chunk_start
+ stripe_nr
* map
->stripe_len
;
3294 WARN_ON(nr
>= map
->num_stripes
);
3295 for (j
= 0; j
< nr
; j
++) {
3296 if (buf
[j
] == bytenr
)
3300 WARN_ON(nr
>= map
->num_stripes
);
3307 *stripe_len
= map
->stripe_len
;
3309 free_extent_map(em
);
3313 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
3314 u64 logical
, struct page
*page
)
3316 u64 length
= PAGE_CACHE_SIZE
;
3317 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
3321 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
3323 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
3324 int is_orig_bio
= 0;
3327 atomic_inc(&multi
->error
);
3329 if (bio
== multi
->orig_bio
)
3332 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
3335 bio
= multi
->orig_bio
;
3337 bio
->bi_private
= multi
->private;
3338 bio
->bi_end_io
= multi
->end_io
;
3339 /* only send an error to the higher layers if it is
3340 * beyond the tolerance of the multi-bio
3342 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
3346 * this bio is actually up to date, we didn't
3347 * go over the max number of errors
3349 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
3354 bio_endio(bio
, err
);
3355 } else if (!is_orig_bio
) {
3360 struct async_sched
{
3363 struct btrfs_fs_info
*info
;
3364 struct btrfs_work work
;
3368 * see run_scheduled_bios for a description of why bios are collected for
3371 * This will add one bio to the pending list for a device and make sure
3372 * the work struct is scheduled.
3374 static noinline
int schedule_bio(struct btrfs_root
*root
,
3375 struct btrfs_device
*device
,
3376 int rw
, struct bio
*bio
)
3378 int should_queue
= 1;
3379 struct btrfs_pending_bios
*pending_bios
;
3381 /* don't bother with additional async steps for reads, right now */
3382 if (!(rw
& REQ_WRITE
)) {
3384 submit_bio(rw
, bio
);
3390 * nr_async_bios allows us to reliably return congestion to the
3391 * higher layers. Otherwise, the async bio makes it appear we have
3392 * made progress against dirty pages when we've really just put it
3393 * on a queue for later
3395 atomic_inc(&root
->fs_info
->nr_async_bios
);
3396 WARN_ON(bio
->bi_next
);
3397 bio
->bi_next
= NULL
;
3400 spin_lock(&device
->io_lock
);
3401 if (bio
->bi_rw
& REQ_SYNC
)
3402 pending_bios
= &device
->pending_sync_bios
;
3404 pending_bios
= &device
->pending_bios
;
3406 if (pending_bios
->tail
)
3407 pending_bios
->tail
->bi_next
= bio
;
3409 pending_bios
->tail
= bio
;
3410 if (!pending_bios
->head
)
3411 pending_bios
->head
= bio
;
3412 if (device
->running_pending
)
3415 spin_unlock(&device
->io_lock
);
3418 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
3423 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
3424 int mirror_num
, int async_submit
)
3426 struct btrfs_mapping_tree
*map_tree
;
3427 struct btrfs_device
*dev
;
3428 struct bio
*first_bio
= bio
;
3429 u64 logical
= (u64
)bio
->bi_sector
<< 9;
3432 struct btrfs_multi_bio
*multi
= NULL
;
3437 length
= bio
->bi_size
;
3438 map_tree
= &root
->fs_info
->mapping_tree
;
3439 map_length
= length
;
3441 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
3445 total_devs
= multi
->num_stripes
;
3446 if (map_length
< length
) {
3447 printk(KERN_CRIT
"mapping failed logical %llu bio len %llu "
3448 "len %llu\n", (unsigned long long)logical
,
3449 (unsigned long long)length
,
3450 (unsigned long long)map_length
);
3453 multi
->end_io
= first_bio
->bi_end_io
;
3454 multi
->private = first_bio
->bi_private
;
3455 multi
->orig_bio
= first_bio
;
3456 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
3458 while (dev_nr
< total_devs
) {
3459 if (total_devs
> 1) {
3460 if (dev_nr
< total_devs
- 1) {
3461 bio
= bio_clone(first_bio
, GFP_NOFS
);
3466 bio
->bi_private
= multi
;
3467 bio
->bi_end_io
= end_bio_multi_stripe
;
3469 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
3470 dev
= multi
->stripes
[dev_nr
].dev
;
3471 if (dev
&& dev
->bdev
&& (rw
!= WRITE
|| dev
->writeable
)) {
3472 bio
->bi_bdev
= dev
->bdev
;
3474 schedule_bio(root
, dev
, rw
, bio
);
3476 submit_bio(rw
, bio
);
3478 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
3479 bio
->bi_sector
= logical
>> 9;
3480 bio_endio(bio
, -EIO
);
3484 if (total_devs
== 1)
3489 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
3492 struct btrfs_device
*device
;
3493 struct btrfs_fs_devices
*cur_devices
;
3495 cur_devices
= root
->fs_info
->fs_devices
;
3496 while (cur_devices
) {
3498 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3499 device
= __find_device(&cur_devices
->devices
,
3504 cur_devices
= cur_devices
->seed
;
3509 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
3510 u64 devid
, u8
*dev_uuid
)
3512 struct btrfs_device
*device
;
3513 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
3515 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
3518 list_add(&device
->dev_list
,
3519 &fs_devices
->devices
);
3520 device
->dev_root
= root
->fs_info
->dev_root
;
3521 device
->devid
= devid
;
3522 device
->work
.func
= pending_bios_fn
;
3523 device
->fs_devices
= fs_devices
;
3524 device
->missing
= 1;
3525 fs_devices
->num_devices
++;
3526 fs_devices
->missing_devices
++;
3527 spin_lock_init(&device
->io_lock
);
3528 INIT_LIST_HEAD(&device
->dev_alloc_list
);
3529 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
3533 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
3534 struct extent_buffer
*leaf
,
3535 struct btrfs_chunk
*chunk
)
3537 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
3538 struct map_lookup
*map
;
3539 struct extent_map
*em
;
3543 u8 uuid
[BTRFS_UUID_SIZE
];
3548 logical
= key
->offset
;
3549 length
= btrfs_chunk_length(leaf
, chunk
);
3551 read_lock(&map_tree
->map_tree
.lock
);
3552 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
3553 read_unlock(&map_tree
->map_tree
.lock
);
3555 /* already mapped? */
3556 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
3557 free_extent_map(em
);
3560 free_extent_map(em
);
3563 em
= alloc_extent_map(GFP_NOFS
);
3566 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3567 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
3569 free_extent_map(em
);
3573 em
->bdev
= (struct block_device
*)map
;
3574 em
->start
= logical
;
3576 em
->block_start
= 0;
3577 em
->block_len
= em
->len
;
3579 map
->num_stripes
= num_stripes
;
3580 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
3581 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
3582 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
3583 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
3584 map
->type
= btrfs_chunk_type(leaf
, chunk
);
3585 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
3586 for (i
= 0; i
< num_stripes
; i
++) {
3587 map
->stripes
[i
].physical
=
3588 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
3589 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
3590 read_extent_buffer(leaf
, uuid
, (unsigned long)
3591 btrfs_stripe_dev_uuid_nr(chunk
, i
),
3593 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
3595 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
3597 free_extent_map(em
);
3600 if (!map
->stripes
[i
].dev
) {
3601 map
->stripes
[i
].dev
=
3602 add_missing_dev(root
, devid
, uuid
);
3603 if (!map
->stripes
[i
].dev
) {
3605 free_extent_map(em
);
3609 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
3612 write_lock(&map_tree
->map_tree
.lock
);
3613 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
3614 write_unlock(&map_tree
->map_tree
.lock
);
3616 free_extent_map(em
);
3621 static int fill_device_from_item(struct extent_buffer
*leaf
,
3622 struct btrfs_dev_item
*dev_item
,
3623 struct btrfs_device
*device
)
3627 device
->devid
= btrfs_device_id(leaf
, dev_item
);
3628 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
3629 device
->total_bytes
= device
->disk_total_bytes
;
3630 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
3631 device
->type
= btrfs_device_type(leaf
, dev_item
);
3632 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
3633 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
3634 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
3636 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
3637 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
3642 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
3644 struct btrfs_fs_devices
*fs_devices
;
3647 mutex_lock(&uuid_mutex
);
3649 fs_devices
= root
->fs_info
->fs_devices
->seed
;
3650 while (fs_devices
) {
3651 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3655 fs_devices
= fs_devices
->seed
;
3658 fs_devices
= find_fsid(fsid
);
3664 fs_devices
= clone_fs_devices(fs_devices
);
3665 if (IS_ERR(fs_devices
)) {
3666 ret
= PTR_ERR(fs_devices
);
3670 ret
= __btrfs_open_devices(fs_devices
, FMODE_READ
,
3671 root
->fs_info
->bdev_holder
);
3675 if (!fs_devices
->seeding
) {
3676 __btrfs_close_devices(fs_devices
);
3677 free_fs_devices(fs_devices
);
3682 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
3683 root
->fs_info
->fs_devices
->seed
= fs_devices
;
3685 mutex_unlock(&uuid_mutex
);
3689 static int read_one_dev(struct btrfs_root
*root
,
3690 struct extent_buffer
*leaf
,
3691 struct btrfs_dev_item
*dev_item
)
3693 struct btrfs_device
*device
;
3696 u8 fs_uuid
[BTRFS_UUID_SIZE
];
3697 u8 dev_uuid
[BTRFS_UUID_SIZE
];
3699 devid
= btrfs_device_id(leaf
, dev_item
);
3700 read_extent_buffer(leaf
, dev_uuid
,
3701 (unsigned long)btrfs_device_uuid(dev_item
),
3703 read_extent_buffer(leaf
, fs_uuid
,
3704 (unsigned long)btrfs_device_fsid(dev_item
),
3707 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
3708 ret
= open_seed_devices(root
, fs_uuid
);
3709 if (ret
&& !btrfs_test_opt(root
, DEGRADED
))
3713 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
3714 if (!device
|| !device
->bdev
) {
3715 if (!btrfs_test_opt(root
, DEGRADED
))
3719 printk(KERN_WARNING
"warning devid %llu missing\n",
3720 (unsigned long long)devid
);
3721 device
= add_missing_dev(root
, devid
, dev_uuid
);
3724 } else if (!device
->missing
) {
3726 * this happens when a device that was properly setup
3727 * in the device info lists suddenly goes bad.
3728 * device->bdev is NULL, and so we have to set
3729 * device->missing to one here
3731 root
->fs_info
->fs_devices
->missing_devices
++;
3732 device
->missing
= 1;
3736 if (device
->fs_devices
!= root
->fs_info
->fs_devices
) {
3737 BUG_ON(device
->writeable
);
3738 if (device
->generation
!=
3739 btrfs_device_generation(leaf
, dev_item
))
3743 fill_device_from_item(leaf
, dev_item
, device
);
3744 device
->dev_root
= root
->fs_info
->dev_root
;
3745 device
->in_fs_metadata
= 1;
3746 if (device
->writeable
)
3747 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
3752 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
3754 struct btrfs_dev_item
*dev_item
;
3756 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
3758 return read_one_dev(root
, buf
, dev_item
);
3761 int btrfs_read_sys_array(struct btrfs_root
*root
)
3763 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
3764 struct extent_buffer
*sb
;
3765 struct btrfs_disk_key
*disk_key
;
3766 struct btrfs_chunk
*chunk
;
3768 unsigned long sb_ptr
;
3774 struct btrfs_key key
;
3776 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
3777 BTRFS_SUPER_INFO_SIZE
);
3780 btrfs_set_buffer_uptodate(sb
);
3781 btrfs_set_buffer_lockdep_class(sb
, 0);
3783 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
3784 array_size
= btrfs_super_sys_array_size(super_copy
);
3786 ptr
= super_copy
->sys_chunk_array
;
3787 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
3790 while (cur
< array_size
) {
3791 disk_key
= (struct btrfs_disk_key
*)ptr
;
3792 btrfs_disk_key_to_cpu(&key
, disk_key
);
3794 len
= sizeof(*disk_key
); ptr
+= len
;
3798 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3799 chunk
= (struct btrfs_chunk
*)sb_ptr
;
3800 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
3803 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
3804 len
= btrfs_chunk_item_size(num_stripes
);
3813 free_extent_buffer(sb
);
3817 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
3819 struct btrfs_path
*path
;
3820 struct extent_buffer
*leaf
;
3821 struct btrfs_key key
;
3822 struct btrfs_key found_key
;
3826 root
= root
->fs_info
->chunk_root
;
3828 path
= btrfs_alloc_path();
3832 /* first we search for all of the device items, and then we
3833 * read in all of the chunk items. This way we can create chunk
3834 * mappings that reference all of the devices that are afound
3836 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3840 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3844 leaf
= path
->nodes
[0];
3845 slot
= path
->slots
[0];
3846 if (slot
>= btrfs_header_nritems(leaf
)) {
3847 ret
= btrfs_next_leaf(root
, path
);
3854 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3855 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3856 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
3858 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
3859 struct btrfs_dev_item
*dev_item
;
3860 dev_item
= btrfs_item_ptr(leaf
, slot
,
3861 struct btrfs_dev_item
);
3862 ret
= read_one_dev(root
, leaf
, dev_item
);
3866 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3867 struct btrfs_chunk
*chunk
;
3868 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3869 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
3875 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3877 btrfs_release_path(root
, path
);
3882 btrfs_free_path(path
);