2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
36 static int init_first_rw_device(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct btrfs_device
*device
);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
);
41 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
44 static DEFINE_MUTEX(uuid_mutex
);
45 static LIST_HEAD(fs_uuids
);
47 void btrfs_lock_volumes(void)
49 mutex_lock(&uuid_mutex
);
52 void btrfs_unlock_volumes(void)
54 mutex_unlock(&uuid_mutex
);
57 static void lock_chunks(struct btrfs_root
*root
)
59 mutex_lock(&root
->fs_info
->chunk_mutex
);
62 static void unlock_chunks(struct btrfs_root
*root
)
64 mutex_unlock(&root
->fs_info
->chunk_mutex
);
67 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
69 struct btrfs_device
*device
;
70 WARN_ON(fs_devices
->opened
);
71 while (!list_empty(&fs_devices
->devices
)) {
72 device
= list_entry(fs_devices
->devices
.next
,
73 struct btrfs_device
, dev_list
);
74 list_del(&device
->dev_list
);
81 int btrfs_cleanup_fs_uuids(void)
83 struct btrfs_fs_devices
*fs_devices
;
85 while (!list_empty(&fs_uuids
)) {
86 fs_devices
= list_entry(fs_uuids
.next
,
87 struct btrfs_fs_devices
, list
);
88 list_del(&fs_devices
->list
);
89 free_fs_devices(fs_devices
);
94 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
97 struct btrfs_device
*dev
;
99 list_for_each_entry(dev
, head
, dev_list
) {
100 if (dev
->devid
== devid
&&
101 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
108 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
110 struct btrfs_fs_devices
*fs_devices
;
112 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
113 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
119 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
120 struct bio
*head
, struct bio
*tail
)
123 struct bio
*old_head
;
125 old_head
= pending_bios
->head
;
126 pending_bios
->head
= head
;
127 if (pending_bios
->tail
)
128 tail
->bi_next
= old_head
;
130 pending_bios
->tail
= tail
;
134 * we try to collect pending bios for a device so we don't get a large
135 * number of procs sending bios down to the same device. This greatly
136 * improves the schedulers ability to collect and merge the bios.
138 * But, it also turns into a long list of bios to process and that is sure
139 * to eventually make the worker thread block. The solution here is to
140 * make some progress and then put this work struct back at the end of
141 * the list if the block device is congested. This way, multiple devices
142 * can make progress from a single worker thread.
144 static noinline
int run_scheduled_bios(struct btrfs_device
*device
)
147 struct backing_dev_info
*bdi
;
148 struct btrfs_fs_info
*fs_info
;
149 struct btrfs_pending_bios
*pending_bios
;
153 unsigned long num_run
;
154 unsigned long num_sync_run
;
155 unsigned long batch_run
= 0;
157 unsigned long last_waited
= 0;
160 bdi
= blk_get_backing_dev_info(device
->bdev
);
161 fs_info
= device
->dev_root
->fs_info
;
162 limit
= btrfs_async_submit_limit(fs_info
);
163 limit
= limit
* 2 / 3;
165 /* we want to make sure that every time we switch from the sync
166 * list to the normal list, we unplug
171 spin_lock(&device
->io_lock
);
176 /* take all the bios off the list at once and process them
177 * later on (without the lock held). But, remember the
178 * tail and other pointers so the bios can be properly reinserted
179 * into the list if we hit congestion
181 if (!force_reg
&& device
->pending_sync_bios
.head
) {
182 pending_bios
= &device
->pending_sync_bios
;
185 pending_bios
= &device
->pending_bios
;
189 pending
= pending_bios
->head
;
190 tail
= pending_bios
->tail
;
191 WARN_ON(pending
&& !tail
);
194 * if pending was null this time around, no bios need processing
195 * at all and we can stop. Otherwise it'll loop back up again
196 * and do an additional check so no bios are missed.
198 * device->running_pending is used to synchronize with the
201 if (device
->pending_sync_bios
.head
== NULL
&&
202 device
->pending_bios
.head
== NULL
) {
204 device
->running_pending
= 0;
207 device
->running_pending
= 1;
210 pending_bios
->head
= NULL
;
211 pending_bios
->tail
= NULL
;
213 spin_unlock(&device
->io_lock
);
216 * if we're doing the regular priority list, make sure we unplug
217 * for any high prio bios we've sent down
219 if (pending_bios
== &device
->pending_bios
&& num_sync_run
> 0) {
221 blk_run_backing_dev(bdi
, NULL
);
227 /* we want to work on both lists, but do more bios on the
228 * sync list than the regular list
231 pending_bios
!= &device
->pending_sync_bios
&&
232 device
->pending_sync_bios
.head
) ||
233 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
234 device
->pending_bios
.head
)) {
235 spin_lock(&device
->io_lock
);
236 requeue_list(pending_bios
, pending
, tail
);
241 pending
= pending
->bi_next
;
243 atomic_dec(&fs_info
->nr_async_bios
);
245 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
246 waitqueue_active(&fs_info
->async_submit_wait
))
247 wake_up(&fs_info
->async_submit_wait
);
249 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
251 if (cur
->bi_rw
& REQ_SYNC
)
254 submit_bio(cur
->bi_rw
, cur
);
257 if (need_resched()) {
259 blk_run_backing_dev(bdi
, NULL
);
266 * we made progress, there is more work to do and the bdi
267 * is now congested. Back off and let other work structs
270 if (pending
&& bdi_write_congested(bdi
) && batch_run
> 8 &&
271 fs_info
->fs_devices
->open_devices
> 1) {
272 struct io_context
*ioc
;
274 ioc
= current
->io_context
;
277 * the main goal here is that we don't want to
278 * block if we're going to be able to submit
279 * more requests without blocking.
281 * This code does two great things, it pokes into
282 * the elevator code from a filesystem _and_
283 * it makes assumptions about how batching works.
285 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
286 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
288 ioc
->last_waited
== last_waited
)) {
290 * we want to go through our batch of
291 * requests and stop. So, we copy out
292 * the ioc->last_waited time and test
293 * against it before looping
295 last_waited
= ioc
->last_waited
;
296 if (need_resched()) {
298 blk_run_backing_dev(bdi
, NULL
);
305 spin_lock(&device
->io_lock
);
306 requeue_list(pending_bios
, pending
, tail
);
307 device
->running_pending
= 1;
309 spin_unlock(&device
->io_lock
);
310 btrfs_requeue_work(&device
->work
);
317 blk_run_backing_dev(bdi
, NULL
);
320 * IO has already been through a long path to get here. Checksumming,
321 * async helper threads, perhaps compression. We've done a pretty
322 * good job of collecting a batch of IO and should just unplug
323 * the device right away.
325 * This will help anyone who is waiting on the IO, they might have
326 * already unplugged, but managed to do so before the bio they
327 * cared about found its way down here.
329 blk_run_backing_dev(bdi
, NULL
);
335 spin_lock(&device
->io_lock
);
336 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
338 spin_unlock(&device
->io_lock
);
344 static void pending_bios_fn(struct btrfs_work
*work
)
346 struct btrfs_device
*device
;
348 device
= container_of(work
, struct btrfs_device
, work
);
349 run_scheduled_bios(device
);
352 static noinline
int device_list_add(const char *path
,
353 struct btrfs_super_block
*disk_super
,
354 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
356 struct btrfs_device
*device
;
357 struct btrfs_fs_devices
*fs_devices
;
358 u64 found_transid
= btrfs_super_generation(disk_super
);
361 fs_devices
= find_fsid(disk_super
->fsid
);
363 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
366 INIT_LIST_HEAD(&fs_devices
->devices
);
367 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
368 list_add(&fs_devices
->list
, &fs_uuids
);
369 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
370 fs_devices
->latest_devid
= devid
;
371 fs_devices
->latest_trans
= found_transid
;
372 mutex_init(&fs_devices
->device_list_mutex
);
375 device
= __find_device(&fs_devices
->devices
, devid
,
376 disk_super
->dev_item
.uuid
);
379 if (fs_devices
->opened
)
382 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
384 /* we can safely leave the fs_devices entry around */
387 device
->devid
= devid
;
388 device
->work
.func
= pending_bios_fn
;
389 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
391 spin_lock_init(&device
->io_lock
);
392 device
->name
= kstrdup(path
, GFP_NOFS
);
397 INIT_LIST_HEAD(&device
->dev_alloc_list
);
399 mutex_lock(&fs_devices
->device_list_mutex
);
400 list_add(&device
->dev_list
, &fs_devices
->devices
);
401 mutex_unlock(&fs_devices
->device_list_mutex
);
403 device
->fs_devices
= fs_devices
;
404 fs_devices
->num_devices
++;
405 } else if (!device
->name
|| strcmp(device
->name
, path
)) {
406 name
= kstrdup(path
, GFP_NOFS
);
411 if (device
->missing
) {
412 fs_devices
->missing_devices
--;
417 if (found_transid
> fs_devices
->latest_trans
) {
418 fs_devices
->latest_devid
= devid
;
419 fs_devices
->latest_trans
= found_transid
;
421 *fs_devices_ret
= fs_devices
;
425 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
427 struct btrfs_fs_devices
*fs_devices
;
428 struct btrfs_device
*device
;
429 struct btrfs_device
*orig_dev
;
431 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
433 return ERR_PTR(-ENOMEM
);
435 INIT_LIST_HEAD(&fs_devices
->devices
);
436 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
437 INIT_LIST_HEAD(&fs_devices
->list
);
438 mutex_init(&fs_devices
->device_list_mutex
);
439 fs_devices
->latest_devid
= orig
->latest_devid
;
440 fs_devices
->latest_trans
= orig
->latest_trans
;
441 memcpy(fs_devices
->fsid
, orig
->fsid
, sizeof(fs_devices
->fsid
));
443 mutex_lock(&orig
->device_list_mutex
);
444 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
445 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
449 device
->name
= kstrdup(orig_dev
->name
, GFP_NOFS
);
455 device
->devid
= orig_dev
->devid
;
456 device
->work
.func
= pending_bios_fn
;
457 memcpy(device
->uuid
, orig_dev
->uuid
, sizeof(device
->uuid
));
458 spin_lock_init(&device
->io_lock
);
459 INIT_LIST_HEAD(&device
->dev_list
);
460 INIT_LIST_HEAD(&device
->dev_alloc_list
);
462 list_add(&device
->dev_list
, &fs_devices
->devices
);
463 device
->fs_devices
= fs_devices
;
464 fs_devices
->num_devices
++;
466 mutex_unlock(&orig
->device_list_mutex
);
469 mutex_unlock(&orig
->device_list_mutex
);
470 free_fs_devices(fs_devices
);
471 return ERR_PTR(-ENOMEM
);
474 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
476 struct btrfs_device
*device
, *next
;
478 mutex_lock(&uuid_mutex
);
480 mutex_lock(&fs_devices
->device_list_mutex
);
481 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
482 if (device
->in_fs_metadata
)
486 blkdev_put(device
->bdev
, device
->mode
);
488 fs_devices
->open_devices
--;
490 if (device
->writeable
) {
491 list_del_init(&device
->dev_alloc_list
);
492 device
->writeable
= 0;
493 fs_devices
->rw_devices
--;
495 list_del_init(&device
->dev_list
);
496 fs_devices
->num_devices
--;
500 mutex_unlock(&fs_devices
->device_list_mutex
);
502 if (fs_devices
->seed
) {
503 fs_devices
= fs_devices
->seed
;
507 mutex_unlock(&uuid_mutex
);
511 static int __btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
513 struct btrfs_device
*device
;
515 if (--fs_devices
->opened
> 0)
518 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
520 blkdev_put(device
->bdev
, device
->mode
);
521 fs_devices
->open_devices
--;
523 if (device
->writeable
) {
524 list_del_init(&device
->dev_alloc_list
);
525 fs_devices
->rw_devices
--;
529 device
->writeable
= 0;
530 device
->in_fs_metadata
= 0;
532 WARN_ON(fs_devices
->open_devices
);
533 WARN_ON(fs_devices
->rw_devices
);
534 fs_devices
->opened
= 0;
535 fs_devices
->seeding
= 0;
540 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
542 struct btrfs_fs_devices
*seed_devices
= NULL
;
545 mutex_lock(&uuid_mutex
);
546 ret
= __btrfs_close_devices(fs_devices
);
547 if (!fs_devices
->opened
) {
548 seed_devices
= fs_devices
->seed
;
549 fs_devices
->seed
= NULL
;
551 mutex_unlock(&uuid_mutex
);
553 while (seed_devices
) {
554 fs_devices
= seed_devices
;
555 seed_devices
= fs_devices
->seed
;
556 __btrfs_close_devices(fs_devices
);
557 free_fs_devices(fs_devices
);
562 static int __btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
563 fmode_t flags
, void *holder
)
565 struct block_device
*bdev
;
566 struct list_head
*head
= &fs_devices
->devices
;
567 struct btrfs_device
*device
;
568 struct block_device
*latest_bdev
= NULL
;
569 struct buffer_head
*bh
;
570 struct btrfs_super_block
*disk_super
;
571 u64 latest_devid
= 0;
572 u64 latest_transid
= 0;
579 list_for_each_entry(device
, head
, dev_list
) {
585 bdev
= blkdev_get_by_path(device
->name
, flags
, holder
);
587 printk(KERN_INFO
"open %s failed\n", device
->name
);
590 set_blocksize(bdev
, 4096);
592 bh
= btrfs_read_dev_super(bdev
);
598 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
599 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
600 if (devid
!= device
->devid
)
603 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
,
607 device
->generation
= btrfs_super_generation(disk_super
);
608 if (!latest_transid
|| device
->generation
> latest_transid
) {
609 latest_devid
= devid
;
610 latest_transid
= device
->generation
;
614 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
615 device
->writeable
= 0;
617 device
->writeable
= !bdev_read_only(bdev
);
622 device
->in_fs_metadata
= 0;
623 device
->mode
= flags
;
625 if (!blk_queue_nonrot(bdev_get_queue(bdev
)))
626 fs_devices
->rotating
= 1;
628 fs_devices
->open_devices
++;
629 if (device
->writeable
) {
630 fs_devices
->rw_devices
++;
631 list_add(&device
->dev_alloc_list
,
632 &fs_devices
->alloc_list
);
639 blkdev_put(bdev
, flags
);
643 if (fs_devices
->open_devices
== 0) {
647 fs_devices
->seeding
= seeding
;
648 fs_devices
->opened
= 1;
649 fs_devices
->latest_bdev
= latest_bdev
;
650 fs_devices
->latest_devid
= latest_devid
;
651 fs_devices
->latest_trans
= latest_transid
;
652 fs_devices
->total_rw_bytes
= 0;
657 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
658 fmode_t flags
, void *holder
)
662 mutex_lock(&uuid_mutex
);
663 if (fs_devices
->opened
) {
664 fs_devices
->opened
++;
667 ret
= __btrfs_open_devices(fs_devices
, flags
, holder
);
669 mutex_unlock(&uuid_mutex
);
673 int btrfs_scan_one_device(const char *path
, fmode_t flags
, void *holder
,
674 struct btrfs_fs_devices
**fs_devices_ret
)
676 struct btrfs_super_block
*disk_super
;
677 struct block_device
*bdev
;
678 struct buffer_head
*bh
;
683 mutex_lock(&uuid_mutex
);
686 bdev
= blkdev_get_by_path(path
, flags
, holder
);
693 ret
= set_blocksize(bdev
, 4096);
696 bh
= btrfs_read_dev_super(bdev
);
701 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
702 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
703 transid
= btrfs_super_generation(disk_super
);
704 if (disk_super
->label
[0])
705 printk(KERN_INFO
"device label %s ", disk_super
->label
);
707 /* FIXME, make a readl uuid parser */
708 printk(KERN_INFO
"device fsid %llx-%llx ",
709 *(unsigned long long *)disk_super
->fsid
,
710 *(unsigned long long *)(disk_super
->fsid
+ 8));
712 printk(KERN_CONT
"devid %llu transid %llu %s\n",
713 (unsigned long long)devid
, (unsigned long long)transid
, path
);
714 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
718 blkdev_put(bdev
, flags
);
720 mutex_unlock(&uuid_mutex
);
724 /* helper to account the used device space in the range */
725 int btrfs_account_dev_extents_size(struct btrfs_device
*device
, u64 start
,
726 u64 end
, u64
*length
)
728 struct btrfs_key key
;
729 struct btrfs_root
*root
= device
->dev_root
;
730 struct btrfs_dev_extent
*dev_extent
;
731 struct btrfs_path
*path
;
735 struct extent_buffer
*l
;
739 if (start
>= device
->total_bytes
)
742 path
= btrfs_alloc_path();
747 key
.objectid
= device
->devid
;
749 key
.type
= BTRFS_DEV_EXTENT_KEY
;
751 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
755 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
762 slot
= path
->slots
[0];
763 if (slot
>= btrfs_header_nritems(l
)) {
764 ret
= btrfs_next_leaf(root
, path
);
772 btrfs_item_key_to_cpu(l
, &key
, slot
);
774 if (key
.objectid
< device
->devid
)
777 if (key
.objectid
> device
->devid
)
780 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
783 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
784 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
786 if (key
.offset
<= start
&& extent_end
> end
) {
787 *length
= end
- start
+ 1;
789 } else if (key
.offset
<= start
&& extent_end
> start
)
790 *length
+= extent_end
- start
;
791 else if (key
.offset
> start
&& extent_end
<= end
)
792 *length
+= extent_end
- key
.offset
;
793 else if (key
.offset
> start
&& key
.offset
<= end
) {
794 *length
+= end
- key
.offset
+ 1;
796 } else if (key
.offset
> end
)
804 btrfs_free_path(path
);
809 * find_free_dev_extent - find free space in the specified device
810 * @trans: transaction handler
811 * @device: the device which we search the free space in
812 * @num_bytes: the size of the free space that we need
813 * @start: store the start of the free space.
814 * @len: the size of the free space. that we find, or the size of the max
815 * free space if we don't find suitable free space
817 * this uses a pretty simple search, the expectation is that it is
818 * called very infrequently and that a given device has a small number
821 * @start is used to store the start of the free space if we find. But if we
822 * don't find suitable free space, it will be used to store the start position
823 * of the max free space.
825 * @len is used to store the size of the free space that we find.
826 * But if we don't find suitable free space, it is used to store the size of
827 * the max free space.
829 int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
830 struct btrfs_device
*device
, u64 num_bytes
,
831 u64
*start
, u64
*len
)
833 struct btrfs_key key
;
834 struct btrfs_root
*root
= device
->dev_root
;
835 struct btrfs_dev_extent
*dev_extent
;
836 struct btrfs_path
*path
;
842 u64 search_end
= device
->total_bytes
;
845 struct extent_buffer
*l
;
847 /* FIXME use last free of some kind */
849 /* we don't want to overwrite the superblock on the drive,
850 * so we make sure to start at an offset of at least 1MB
852 search_start
= 1024 * 1024;
854 if (root
->fs_info
->alloc_start
+ num_bytes
<= search_end
)
855 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
857 max_hole_start
= search_start
;
860 if (search_start
>= search_end
) {
865 path
= btrfs_alloc_path();
872 key
.objectid
= device
->devid
;
873 key
.offset
= search_start
;
874 key
.type
= BTRFS_DEV_EXTENT_KEY
;
876 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
880 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
887 slot
= path
->slots
[0];
888 if (slot
>= btrfs_header_nritems(l
)) {
889 ret
= btrfs_next_leaf(root
, path
);
897 btrfs_item_key_to_cpu(l
, &key
, slot
);
899 if (key
.objectid
< device
->devid
)
902 if (key
.objectid
> device
->devid
)
905 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
908 if (key
.offset
> search_start
) {
909 hole_size
= key
.offset
- search_start
;
911 if (hole_size
> max_hole_size
) {
912 max_hole_start
= search_start
;
913 max_hole_size
= hole_size
;
917 * If this free space is greater than which we need,
918 * it must be the max free space that we have found
919 * until now, so max_hole_start must point to the start
920 * of this free space and the length of this free space
921 * is stored in max_hole_size. Thus, we return
922 * max_hole_start and max_hole_size and go back to the
925 if (hole_size
>= num_bytes
) {
931 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
932 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
934 if (extent_end
> search_start
)
935 search_start
= extent_end
;
941 hole_size
= search_end
- search_start
;
942 if (hole_size
> max_hole_size
) {
943 max_hole_start
= search_start
;
944 max_hole_size
= hole_size
;
948 if (hole_size
< num_bytes
)
954 btrfs_free_path(path
);
956 *start
= max_hole_start
;
958 *len
= max_hole_size
;
962 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
963 struct btrfs_device
*device
,
967 struct btrfs_path
*path
;
968 struct btrfs_root
*root
= device
->dev_root
;
969 struct btrfs_key key
;
970 struct btrfs_key found_key
;
971 struct extent_buffer
*leaf
= NULL
;
972 struct btrfs_dev_extent
*extent
= NULL
;
974 path
= btrfs_alloc_path();
978 key
.objectid
= device
->devid
;
980 key
.type
= BTRFS_DEV_EXTENT_KEY
;
982 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
984 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
985 BTRFS_DEV_EXTENT_KEY
);
987 leaf
= path
->nodes
[0];
988 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
989 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
990 struct btrfs_dev_extent
);
991 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
992 btrfs_dev_extent_length(leaf
, extent
) < start
);
994 } else if (ret
== 0) {
995 leaf
= path
->nodes
[0];
996 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
997 struct btrfs_dev_extent
);
1001 if (device
->bytes_used
> 0)
1002 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
1003 ret
= btrfs_del_item(trans
, root
, path
);
1006 btrfs_free_path(path
);
1010 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
1011 struct btrfs_device
*device
,
1012 u64 chunk_tree
, u64 chunk_objectid
,
1013 u64 chunk_offset
, u64 start
, u64 num_bytes
)
1016 struct btrfs_path
*path
;
1017 struct btrfs_root
*root
= device
->dev_root
;
1018 struct btrfs_dev_extent
*extent
;
1019 struct extent_buffer
*leaf
;
1020 struct btrfs_key key
;
1022 WARN_ON(!device
->in_fs_metadata
);
1023 path
= btrfs_alloc_path();
1027 key
.objectid
= device
->devid
;
1029 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1030 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1034 leaf
= path
->nodes
[0];
1035 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1036 struct btrfs_dev_extent
);
1037 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
1038 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
1039 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
1041 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
1042 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
1045 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1046 btrfs_mark_buffer_dirty(leaf
);
1047 btrfs_free_path(path
);
1051 static noinline
int find_next_chunk(struct btrfs_root
*root
,
1052 u64 objectid
, u64
*offset
)
1054 struct btrfs_path
*path
;
1056 struct btrfs_key key
;
1057 struct btrfs_chunk
*chunk
;
1058 struct btrfs_key found_key
;
1060 path
= btrfs_alloc_path();
1063 key
.objectid
= objectid
;
1064 key
.offset
= (u64
)-1;
1065 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1067 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1073 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
1077 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1079 if (found_key
.objectid
!= objectid
)
1082 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1083 struct btrfs_chunk
);
1084 *offset
= found_key
.offset
+
1085 btrfs_chunk_length(path
->nodes
[0], chunk
);
1090 btrfs_free_path(path
);
1094 static noinline
int find_next_devid(struct btrfs_root
*root
, u64
*objectid
)
1097 struct btrfs_key key
;
1098 struct btrfs_key found_key
;
1099 struct btrfs_path
*path
;
1101 root
= root
->fs_info
->chunk_root
;
1103 path
= btrfs_alloc_path();
1107 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1108 key
.type
= BTRFS_DEV_ITEM_KEY
;
1109 key
.offset
= (u64
)-1;
1111 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1117 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
1118 BTRFS_DEV_ITEM_KEY
);
1122 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1124 *objectid
= found_key
.offset
+ 1;
1128 btrfs_free_path(path
);
1133 * the device information is stored in the chunk root
1134 * the btrfs_device struct should be fully filled in
1136 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
1137 struct btrfs_root
*root
,
1138 struct btrfs_device
*device
)
1141 struct btrfs_path
*path
;
1142 struct btrfs_dev_item
*dev_item
;
1143 struct extent_buffer
*leaf
;
1144 struct btrfs_key key
;
1147 root
= root
->fs_info
->chunk_root
;
1149 path
= btrfs_alloc_path();
1153 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1154 key
.type
= BTRFS_DEV_ITEM_KEY
;
1155 key
.offset
= device
->devid
;
1157 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1162 leaf
= path
->nodes
[0];
1163 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1165 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1166 btrfs_set_device_generation(leaf
, dev_item
, 0);
1167 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1168 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1169 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1170 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1171 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1172 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1173 btrfs_set_device_group(leaf
, dev_item
, 0);
1174 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1175 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1176 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1178 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1179 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1180 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
1181 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
1182 btrfs_mark_buffer_dirty(leaf
);
1186 btrfs_free_path(path
);
1190 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
1191 struct btrfs_device
*device
)
1194 struct btrfs_path
*path
;
1195 struct btrfs_key key
;
1196 struct btrfs_trans_handle
*trans
;
1198 root
= root
->fs_info
->chunk_root
;
1200 path
= btrfs_alloc_path();
1204 trans
= btrfs_start_transaction(root
, 0);
1205 if (IS_ERR(trans
)) {
1206 btrfs_free_path(path
);
1207 return PTR_ERR(trans
);
1209 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1210 key
.type
= BTRFS_DEV_ITEM_KEY
;
1211 key
.offset
= device
->devid
;
1214 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1223 ret
= btrfs_del_item(trans
, root
, path
);
1227 btrfs_free_path(path
);
1228 unlock_chunks(root
);
1229 btrfs_commit_transaction(trans
, root
);
1233 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
1235 struct btrfs_device
*device
;
1236 struct btrfs_device
*next_device
;
1237 struct block_device
*bdev
;
1238 struct buffer_head
*bh
= NULL
;
1239 struct btrfs_super_block
*disk_super
;
1246 mutex_lock(&uuid_mutex
);
1247 mutex_lock(&root
->fs_info
->volume_mutex
);
1249 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
1250 root
->fs_info
->avail_system_alloc_bits
|
1251 root
->fs_info
->avail_metadata_alloc_bits
;
1253 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
1254 root
->fs_info
->fs_devices
->num_devices
<= 4) {
1255 printk(KERN_ERR
"btrfs: unable to go below four devices "
1261 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
1262 root
->fs_info
->fs_devices
->num_devices
<= 2) {
1263 printk(KERN_ERR
"btrfs: unable to go below two "
1264 "devices on raid1\n");
1269 if (strcmp(device_path
, "missing") == 0) {
1270 struct list_head
*devices
;
1271 struct btrfs_device
*tmp
;
1274 devices
= &root
->fs_info
->fs_devices
->devices
;
1275 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1276 list_for_each_entry(tmp
, devices
, dev_list
) {
1277 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
1282 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1287 printk(KERN_ERR
"btrfs: no missing devices found to "
1292 bdev
= blkdev_get_by_path(device_path
, FMODE_READ
| FMODE_EXCL
,
1293 root
->fs_info
->bdev_holder
);
1295 ret
= PTR_ERR(bdev
);
1299 set_blocksize(bdev
, 4096);
1300 bh
= btrfs_read_dev_super(bdev
);
1305 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
1306 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
1307 dev_uuid
= disk_super
->dev_item
.uuid
;
1308 device
= btrfs_find_device(root
, devid
, dev_uuid
,
1316 if (device
->writeable
&& root
->fs_info
->fs_devices
->rw_devices
== 1) {
1317 printk(KERN_ERR
"btrfs: unable to remove the only writeable "
1323 if (device
->writeable
) {
1324 list_del_init(&device
->dev_alloc_list
);
1325 root
->fs_info
->fs_devices
->rw_devices
--;
1328 ret
= btrfs_shrink_device(device
, 0);
1332 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
1336 device
->in_fs_metadata
= 0;
1339 * the device list mutex makes sure that we don't change
1340 * the device list while someone else is writing out all
1341 * the device supers.
1343 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1344 list_del_init(&device
->dev_list
);
1345 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1347 device
->fs_devices
->num_devices
--;
1349 if (device
->missing
)
1350 root
->fs_info
->fs_devices
->missing_devices
--;
1352 next_device
= list_entry(root
->fs_info
->fs_devices
->devices
.next
,
1353 struct btrfs_device
, dev_list
);
1354 if (device
->bdev
== root
->fs_info
->sb
->s_bdev
)
1355 root
->fs_info
->sb
->s_bdev
= next_device
->bdev
;
1356 if (device
->bdev
== root
->fs_info
->fs_devices
->latest_bdev
)
1357 root
->fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1360 blkdev_put(device
->bdev
, device
->mode
);
1361 device
->bdev
= NULL
;
1362 device
->fs_devices
->open_devices
--;
1365 num_devices
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1366 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
, num_devices
);
1368 if (device
->fs_devices
->open_devices
== 0) {
1369 struct btrfs_fs_devices
*fs_devices
;
1370 fs_devices
= root
->fs_info
->fs_devices
;
1371 while (fs_devices
) {
1372 if (fs_devices
->seed
== device
->fs_devices
)
1374 fs_devices
= fs_devices
->seed
;
1376 fs_devices
->seed
= device
->fs_devices
->seed
;
1377 device
->fs_devices
->seed
= NULL
;
1378 __btrfs_close_devices(device
->fs_devices
);
1379 free_fs_devices(device
->fs_devices
);
1383 * at this point, the device is zero sized. We want to
1384 * remove it from the devices list and zero out the old super
1386 if (device
->writeable
) {
1387 /* make sure this device isn't detected as part of
1390 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
1391 set_buffer_dirty(bh
);
1392 sync_dirty_buffer(bh
);
1395 kfree(device
->name
);
1403 blkdev_put(bdev
, FMODE_READ
| FMODE_EXCL
);
1405 mutex_unlock(&root
->fs_info
->volume_mutex
);
1406 mutex_unlock(&uuid_mutex
);
1409 if (device
->writeable
) {
1410 list_add(&device
->dev_alloc_list
,
1411 &root
->fs_info
->fs_devices
->alloc_list
);
1412 root
->fs_info
->fs_devices
->rw_devices
++;
1418 * does all the dirty work required for changing file system's UUID.
1420 static int btrfs_prepare_sprout(struct btrfs_trans_handle
*trans
,
1421 struct btrfs_root
*root
)
1423 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
1424 struct btrfs_fs_devices
*old_devices
;
1425 struct btrfs_fs_devices
*seed_devices
;
1426 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
1427 struct btrfs_device
*device
;
1430 BUG_ON(!mutex_is_locked(&uuid_mutex
));
1431 if (!fs_devices
->seeding
)
1434 seed_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1438 old_devices
= clone_fs_devices(fs_devices
);
1439 if (IS_ERR(old_devices
)) {
1440 kfree(seed_devices
);
1441 return PTR_ERR(old_devices
);
1444 list_add(&old_devices
->list
, &fs_uuids
);
1446 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
1447 seed_devices
->opened
= 1;
1448 INIT_LIST_HEAD(&seed_devices
->devices
);
1449 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
1450 mutex_init(&seed_devices
->device_list_mutex
);
1451 list_splice_init(&fs_devices
->devices
, &seed_devices
->devices
);
1452 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
1453 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
) {
1454 device
->fs_devices
= seed_devices
;
1457 fs_devices
->seeding
= 0;
1458 fs_devices
->num_devices
= 0;
1459 fs_devices
->open_devices
= 0;
1460 fs_devices
->seed
= seed_devices
;
1462 generate_random_uuid(fs_devices
->fsid
);
1463 memcpy(root
->fs_info
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1464 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1465 super_flags
= btrfs_super_flags(disk_super
) &
1466 ~BTRFS_SUPER_FLAG_SEEDING
;
1467 btrfs_set_super_flags(disk_super
, super_flags
);
1473 * strore the expected generation for seed devices in device items.
1475 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
,
1476 struct btrfs_root
*root
)
1478 struct btrfs_path
*path
;
1479 struct extent_buffer
*leaf
;
1480 struct btrfs_dev_item
*dev_item
;
1481 struct btrfs_device
*device
;
1482 struct btrfs_key key
;
1483 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1484 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1488 path
= btrfs_alloc_path();
1492 root
= root
->fs_info
->chunk_root
;
1493 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1495 key
.type
= BTRFS_DEV_ITEM_KEY
;
1498 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1502 leaf
= path
->nodes
[0];
1504 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1505 ret
= btrfs_next_leaf(root
, path
);
1510 leaf
= path
->nodes
[0];
1511 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1512 btrfs_release_path(root
, path
);
1516 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1517 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
1518 key
.type
!= BTRFS_DEV_ITEM_KEY
)
1521 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1522 struct btrfs_dev_item
);
1523 devid
= btrfs_device_id(leaf
, dev_item
);
1524 read_extent_buffer(leaf
, dev_uuid
,
1525 (unsigned long)btrfs_device_uuid(dev_item
),
1527 read_extent_buffer(leaf
, fs_uuid
,
1528 (unsigned long)btrfs_device_fsid(dev_item
),
1530 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1533 if (device
->fs_devices
->seeding
) {
1534 btrfs_set_device_generation(leaf
, dev_item
,
1535 device
->generation
);
1536 btrfs_mark_buffer_dirty(leaf
);
1544 btrfs_free_path(path
);
1548 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1550 struct btrfs_trans_handle
*trans
;
1551 struct btrfs_device
*device
;
1552 struct block_device
*bdev
;
1553 struct list_head
*devices
;
1554 struct super_block
*sb
= root
->fs_info
->sb
;
1556 int seeding_dev
= 0;
1559 if ((sb
->s_flags
& MS_RDONLY
) && !root
->fs_info
->fs_devices
->seeding
)
1562 bdev
= blkdev_get_by_path(device_path
, FMODE_EXCL
,
1563 root
->fs_info
->bdev_holder
);
1565 return PTR_ERR(bdev
);
1567 if (root
->fs_info
->fs_devices
->seeding
) {
1569 down_write(&sb
->s_umount
);
1570 mutex_lock(&uuid_mutex
);
1573 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1574 mutex_lock(&root
->fs_info
->volume_mutex
);
1576 devices
= &root
->fs_info
->fs_devices
->devices
;
1578 * we have the volume lock, so we don't need the extra
1579 * device list mutex while reading the list here.
1581 list_for_each_entry(device
, devices
, dev_list
) {
1582 if (device
->bdev
== bdev
) {
1588 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1590 /* we can safely leave the fs_devices entry around */
1595 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1596 if (!device
->name
) {
1602 ret
= find_next_devid(root
, &device
->devid
);
1604 kfree(device
->name
);
1609 trans
= btrfs_start_transaction(root
, 0);
1610 if (IS_ERR(trans
)) {
1611 kfree(device
->name
);
1613 ret
= PTR_ERR(trans
);
1619 device
->writeable
= 1;
1620 device
->work
.func
= pending_bios_fn
;
1621 generate_random_uuid(device
->uuid
);
1622 spin_lock_init(&device
->io_lock
);
1623 device
->generation
= trans
->transid
;
1624 device
->io_width
= root
->sectorsize
;
1625 device
->io_align
= root
->sectorsize
;
1626 device
->sector_size
= root
->sectorsize
;
1627 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1628 device
->disk_total_bytes
= device
->total_bytes
;
1629 device
->dev_root
= root
->fs_info
->dev_root
;
1630 device
->bdev
= bdev
;
1631 device
->in_fs_metadata
= 1;
1632 device
->mode
= FMODE_EXCL
;
1633 set_blocksize(device
->bdev
, 4096);
1636 sb
->s_flags
&= ~MS_RDONLY
;
1637 ret
= btrfs_prepare_sprout(trans
, root
);
1641 device
->fs_devices
= root
->fs_info
->fs_devices
;
1644 * we don't want write_supers to jump in here with our device
1647 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1648 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1649 list_add(&device
->dev_alloc_list
,
1650 &root
->fs_info
->fs_devices
->alloc_list
);
1651 root
->fs_info
->fs_devices
->num_devices
++;
1652 root
->fs_info
->fs_devices
->open_devices
++;
1653 root
->fs_info
->fs_devices
->rw_devices
++;
1654 root
->fs_info
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
1656 if (!blk_queue_nonrot(bdev_get_queue(bdev
)))
1657 root
->fs_info
->fs_devices
->rotating
= 1;
1659 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1660 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1661 total_bytes
+ device
->total_bytes
);
1663 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1664 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1666 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
1669 ret
= init_first_rw_device(trans
, root
, device
);
1671 ret
= btrfs_finish_sprout(trans
, root
);
1674 ret
= btrfs_add_device(trans
, root
, device
);
1678 * we've got more storage, clear any full flags on the space
1681 btrfs_clear_space_info_full(root
->fs_info
);
1683 unlock_chunks(root
);
1684 btrfs_commit_transaction(trans
, root
);
1687 mutex_unlock(&uuid_mutex
);
1688 up_write(&sb
->s_umount
);
1690 ret
= btrfs_relocate_sys_chunks(root
);
1694 mutex_unlock(&root
->fs_info
->volume_mutex
);
1697 blkdev_put(bdev
, FMODE_EXCL
);
1699 mutex_unlock(&uuid_mutex
);
1700 up_write(&sb
->s_umount
);
1705 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
1706 struct btrfs_device
*device
)
1709 struct btrfs_path
*path
;
1710 struct btrfs_root
*root
;
1711 struct btrfs_dev_item
*dev_item
;
1712 struct extent_buffer
*leaf
;
1713 struct btrfs_key key
;
1715 root
= device
->dev_root
->fs_info
->chunk_root
;
1717 path
= btrfs_alloc_path();
1721 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1722 key
.type
= BTRFS_DEV_ITEM_KEY
;
1723 key
.offset
= device
->devid
;
1725 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1734 leaf
= path
->nodes
[0];
1735 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1737 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1738 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1739 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1740 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1741 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1742 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->disk_total_bytes
);
1743 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1744 btrfs_mark_buffer_dirty(leaf
);
1747 btrfs_free_path(path
);
1751 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1752 struct btrfs_device
*device
, u64 new_size
)
1754 struct btrfs_super_block
*super_copy
=
1755 &device
->dev_root
->fs_info
->super_copy
;
1756 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1757 u64 diff
= new_size
- device
->total_bytes
;
1759 if (!device
->writeable
)
1761 if (new_size
<= device
->total_bytes
)
1764 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1765 device
->fs_devices
->total_rw_bytes
+= diff
;
1767 device
->total_bytes
= new_size
;
1768 device
->disk_total_bytes
= new_size
;
1769 btrfs_clear_space_info_full(device
->dev_root
->fs_info
);
1771 return btrfs_update_device(trans
, device
);
1774 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1775 struct btrfs_device
*device
, u64 new_size
)
1778 lock_chunks(device
->dev_root
);
1779 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1780 unlock_chunks(device
->dev_root
);
1784 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1785 struct btrfs_root
*root
,
1786 u64 chunk_tree
, u64 chunk_objectid
,
1790 struct btrfs_path
*path
;
1791 struct btrfs_key key
;
1793 root
= root
->fs_info
->chunk_root
;
1794 path
= btrfs_alloc_path();
1798 key
.objectid
= chunk_objectid
;
1799 key
.offset
= chunk_offset
;
1800 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1802 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1805 ret
= btrfs_del_item(trans
, root
, path
);
1808 btrfs_free_path(path
);
1812 static int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1815 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1816 struct btrfs_disk_key
*disk_key
;
1817 struct btrfs_chunk
*chunk
;
1824 struct btrfs_key key
;
1826 array_size
= btrfs_super_sys_array_size(super_copy
);
1828 ptr
= super_copy
->sys_chunk_array
;
1831 while (cur
< array_size
) {
1832 disk_key
= (struct btrfs_disk_key
*)ptr
;
1833 btrfs_disk_key_to_cpu(&key
, disk_key
);
1835 len
= sizeof(*disk_key
);
1837 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1838 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1839 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1840 len
+= btrfs_chunk_item_size(num_stripes
);
1845 if (key
.objectid
== chunk_objectid
&&
1846 key
.offset
== chunk_offset
) {
1847 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1849 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1858 static int btrfs_relocate_chunk(struct btrfs_root
*root
,
1859 u64 chunk_tree
, u64 chunk_objectid
,
1862 struct extent_map_tree
*em_tree
;
1863 struct btrfs_root
*extent_root
;
1864 struct btrfs_trans_handle
*trans
;
1865 struct extent_map
*em
;
1866 struct map_lookup
*map
;
1870 root
= root
->fs_info
->chunk_root
;
1871 extent_root
= root
->fs_info
->extent_root
;
1872 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1874 ret
= btrfs_can_relocate(extent_root
, chunk_offset
);
1878 /* step one, relocate all the extents inside this chunk */
1879 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1883 trans
= btrfs_start_transaction(root
, 0);
1884 BUG_ON(IS_ERR(trans
));
1889 * step two, delete the device extents and the
1890 * chunk tree entries
1892 read_lock(&em_tree
->lock
);
1893 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1894 read_unlock(&em_tree
->lock
);
1896 BUG_ON(em
->start
> chunk_offset
||
1897 em
->start
+ em
->len
< chunk_offset
);
1898 map
= (struct map_lookup
*)em
->bdev
;
1900 for (i
= 0; i
< map
->num_stripes
; i
++) {
1901 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1902 map
->stripes
[i
].physical
);
1905 if (map
->stripes
[i
].dev
) {
1906 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1910 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1915 trace_btrfs_chunk_free(root
, map
, chunk_offset
, em
->len
);
1917 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1918 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1922 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1925 write_lock(&em_tree
->lock
);
1926 remove_extent_mapping(em_tree
, em
);
1927 write_unlock(&em_tree
->lock
);
1932 /* once for the tree */
1933 free_extent_map(em
);
1935 free_extent_map(em
);
1937 unlock_chunks(root
);
1938 btrfs_end_transaction(trans
, root
);
1942 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
)
1944 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
1945 struct btrfs_path
*path
;
1946 struct extent_buffer
*leaf
;
1947 struct btrfs_chunk
*chunk
;
1948 struct btrfs_key key
;
1949 struct btrfs_key found_key
;
1950 u64 chunk_tree
= chunk_root
->root_key
.objectid
;
1952 bool retried
= false;
1956 path
= btrfs_alloc_path();
1961 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1962 key
.offset
= (u64
)-1;
1963 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1966 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1971 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
1978 leaf
= path
->nodes
[0];
1979 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1981 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
1982 struct btrfs_chunk
);
1983 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
1984 btrfs_release_path(chunk_root
, path
);
1986 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1987 ret
= btrfs_relocate_chunk(chunk_root
, chunk_tree
,
1996 if (found_key
.offset
== 0)
1998 key
.offset
= found_key
.offset
- 1;
2001 if (failed
&& !retried
) {
2005 } else if (failed
&& retried
) {
2010 btrfs_free_path(path
);
2014 static u64
div_factor(u64 num
, int factor
)
2023 int btrfs_balance(struct btrfs_root
*dev_root
)
2026 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
2027 struct btrfs_device
*device
;
2030 struct btrfs_path
*path
;
2031 struct btrfs_key key
;
2032 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
2033 struct btrfs_trans_handle
*trans
;
2034 struct btrfs_key found_key
;
2036 if (dev_root
->fs_info
->sb
->s_flags
& MS_RDONLY
)
2039 if (!capable(CAP_SYS_ADMIN
))
2042 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
2043 dev_root
= dev_root
->fs_info
->dev_root
;
2045 /* step one make some room on all the devices */
2046 list_for_each_entry(device
, devices
, dev_list
) {
2047 old_size
= device
->total_bytes
;
2048 size_to_free
= div_factor(old_size
, 1);
2049 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
2050 if (!device
->writeable
||
2051 device
->total_bytes
- device
->bytes_used
> size_to_free
)
2054 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
2059 trans
= btrfs_start_transaction(dev_root
, 0);
2060 BUG_ON(IS_ERR(trans
));
2062 ret
= btrfs_grow_device(trans
, device
, old_size
);
2065 btrfs_end_transaction(trans
, dev_root
);
2068 /* step two, relocate all the chunks */
2069 path
= btrfs_alloc_path();
2072 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2073 key
.offset
= (u64
)-1;
2074 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2077 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
2082 * this shouldn't happen, it means the last relocate
2088 ret
= btrfs_previous_item(chunk_root
, path
, 0,
2089 BTRFS_CHUNK_ITEM_KEY
);
2093 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2095 if (found_key
.objectid
!= key
.objectid
)
2098 /* chunk zero is special */
2099 if (found_key
.offset
== 0)
2102 btrfs_release_path(chunk_root
, path
);
2103 ret
= btrfs_relocate_chunk(chunk_root
,
2104 chunk_root
->root_key
.objectid
,
2107 BUG_ON(ret
&& ret
!= -ENOSPC
);
2108 key
.offset
= found_key
.offset
- 1;
2112 btrfs_free_path(path
);
2113 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
2118 * shrinking a device means finding all of the device extents past
2119 * the new size, and then following the back refs to the chunks.
2120 * The chunk relocation code actually frees the device extent
2122 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
2124 struct btrfs_trans_handle
*trans
;
2125 struct btrfs_root
*root
= device
->dev_root
;
2126 struct btrfs_dev_extent
*dev_extent
= NULL
;
2127 struct btrfs_path
*path
;
2135 bool retried
= false;
2136 struct extent_buffer
*l
;
2137 struct btrfs_key key
;
2138 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2139 u64 old_total
= btrfs_super_total_bytes(super_copy
);
2140 u64 old_size
= device
->total_bytes
;
2141 u64 diff
= device
->total_bytes
- new_size
;
2143 if (new_size
>= device
->total_bytes
)
2146 path
= btrfs_alloc_path();
2154 device
->total_bytes
= new_size
;
2155 if (device
->writeable
)
2156 device
->fs_devices
->total_rw_bytes
-= diff
;
2157 unlock_chunks(root
);
2160 key
.objectid
= device
->devid
;
2161 key
.offset
= (u64
)-1;
2162 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2165 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2169 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
2174 btrfs_release_path(root
, path
);
2179 slot
= path
->slots
[0];
2180 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
2182 if (key
.objectid
!= device
->devid
) {
2183 btrfs_release_path(root
, path
);
2187 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2188 length
= btrfs_dev_extent_length(l
, dev_extent
);
2190 if (key
.offset
+ length
<= new_size
) {
2191 btrfs_release_path(root
, path
);
2195 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2196 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2197 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2198 btrfs_release_path(root
, path
);
2200 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
2202 if (ret
&& ret
!= -ENOSPC
)
2209 if (failed
&& !retried
) {
2213 } else if (failed
&& retried
) {
2217 device
->total_bytes
= old_size
;
2218 if (device
->writeable
)
2219 device
->fs_devices
->total_rw_bytes
+= diff
;
2220 unlock_chunks(root
);
2224 /* Shrinking succeeded, else we would be at "done". */
2225 trans
= btrfs_start_transaction(root
, 0);
2226 if (IS_ERR(trans
)) {
2227 ret
= PTR_ERR(trans
);
2233 device
->disk_total_bytes
= new_size
;
2234 /* Now btrfs_update_device() will change the on-disk size. */
2235 ret
= btrfs_update_device(trans
, device
);
2237 unlock_chunks(root
);
2238 btrfs_end_transaction(trans
, root
);
2241 WARN_ON(diff
> old_total
);
2242 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
2243 unlock_chunks(root
);
2244 btrfs_end_transaction(trans
, root
);
2246 btrfs_free_path(path
);
2250 static int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
2251 struct btrfs_root
*root
,
2252 struct btrfs_key
*key
,
2253 struct btrfs_chunk
*chunk
, int item_size
)
2255 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2256 struct btrfs_disk_key disk_key
;
2260 array_size
= btrfs_super_sys_array_size(super_copy
);
2261 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
2264 ptr
= super_copy
->sys_chunk_array
+ array_size
;
2265 btrfs_cpu_key_to_disk(&disk_key
, key
);
2266 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
2267 ptr
+= sizeof(disk_key
);
2268 memcpy(ptr
, chunk
, item_size
);
2269 item_size
+= sizeof(disk_key
);
2270 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
2274 static noinline u64
chunk_bytes_by_type(u64 type
, u64 calc_size
,
2275 int num_stripes
, int sub_stripes
)
2277 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
2279 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
2280 return calc_size
* (num_stripes
/ sub_stripes
);
2282 return calc_size
* num_stripes
;
2285 static int __btrfs_calc_nstripes(struct btrfs_fs_devices
*fs_devices
, u64 type
,
2286 int *num_stripes
, int *min_stripes
,
2293 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
2294 *num_stripes
= fs_devices
->rw_devices
;
2297 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
2301 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
2302 if (fs_devices
->rw_devices
< 2)
2307 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2308 *num_stripes
= fs_devices
->rw_devices
;
2309 if (*num_stripes
< 4)
2311 *num_stripes
&= ~(u32
)1;
2319 static u64
__btrfs_calc_stripe_size(struct btrfs_fs_devices
*fs_devices
,
2320 u64 proposed_size
, u64 type
,
2321 int num_stripes
, int small_stripe
)
2323 int min_stripe_size
= 1 * 1024 * 1024;
2324 u64 calc_size
= proposed_size
;
2325 u64 max_chunk_size
= calc_size
;
2328 if (type
& (BTRFS_BLOCK_GROUP_RAID1
|
2329 BTRFS_BLOCK_GROUP_DUP
|
2330 BTRFS_BLOCK_GROUP_RAID10
))
2333 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
2334 max_chunk_size
= 10 * calc_size
;
2335 min_stripe_size
= 64 * 1024 * 1024;
2336 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
2337 max_chunk_size
= 256 * 1024 * 1024;
2338 min_stripe_size
= 32 * 1024 * 1024;
2339 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2340 calc_size
= 8 * 1024 * 1024;
2341 max_chunk_size
= calc_size
* 2;
2342 min_stripe_size
= 1 * 1024 * 1024;
2345 /* we don't want a chunk larger than 10% of writeable space */
2346 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
2349 if (calc_size
* num_stripes
> max_chunk_size
* ncopies
) {
2350 calc_size
= max_chunk_size
* ncopies
;
2351 do_div(calc_size
, num_stripes
);
2352 do_div(calc_size
, BTRFS_STRIPE_LEN
);
2353 calc_size
*= BTRFS_STRIPE_LEN
;
2356 /* we don't want tiny stripes */
2358 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
2361 * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
2362 * we end up with something bigger than a stripe
2364 calc_size
= max_t(u64
, calc_size
, BTRFS_STRIPE_LEN
);
2366 do_div(calc_size
, BTRFS_STRIPE_LEN
);
2367 calc_size
*= BTRFS_STRIPE_LEN
;
2372 static struct map_lookup
*__shrink_map_lookup_stripes(struct map_lookup
*map
,
2375 struct map_lookup
*new;
2376 size_t len
= map_lookup_size(num_stripes
);
2378 BUG_ON(map
->num_stripes
< num_stripes
);
2380 if (map
->num_stripes
== num_stripes
)
2383 new = kmalloc(len
, GFP_NOFS
);
2385 /* just change map->num_stripes */
2386 map
->num_stripes
= num_stripes
;
2390 memcpy(new, map
, len
);
2391 new->num_stripes
= num_stripes
;
2397 * helper to allocate device space from btrfs_device_info, in which we stored
2398 * max free space information of every device. It is used when we can not
2399 * allocate chunks by default size.
2401 * By this helper, we can allocate a new chunk as larger as possible.
2403 static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle
*trans
,
2404 struct btrfs_fs_devices
*fs_devices
,
2405 struct btrfs_device_info
*devices
,
2406 int nr_device
, u64 type
,
2407 struct map_lookup
**map_lookup
,
2408 int min_stripes
, u64
*stripe_size
)
2410 int i
, index
, sort_again
= 0;
2411 int min_devices
= min_stripes
;
2412 u64 max_avail
, min_free
;
2413 struct map_lookup
*map
= *map_lookup
;
2416 if (nr_device
< min_stripes
)
2419 btrfs_descending_sort_devices(devices
, nr_device
);
2421 max_avail
= devices
[0].max_avail
;
2425 for (i
= 0; i
< nr_device
; i
++) {
2427 * if dev_offset = 0, it means the free space of this device
2428 * is less than what we need, and we didn't search max avail
2429 * extent on this device, so do it now.
2431 if (!devices
[i
].dev_offset
) {
2432 ret
= find_free_dev_extent(trans
, devices
[i
].dev
,
2434 &devices
[i
].dev_offset
,
2435 &devices
[i
].max_avail
);
2436 if (ret
!= 0 && ret
!= -ENOSPC
)
2442 /* we update the max avail free extent of each devices, sort again */
2444 btrfs_descending_sort_devices(devices
, nr_device
);
2446 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2449 if (!devices
[min_devices
- 1].max_avail
)
2452 max_avail
= devices
[min_devices
- 1].max_avail
;
2453 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2454 do_div(max_avail
, 2);
2456 max_avail
= __btrfs_calc_stripe_size(fs_devices
, max_avail
, type
,
2458 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2459 min_free
= max_avail
* 2;
2461 min_free
= max_avail
;
2463 if (min_free
> devices
[min_devices
- 1].max_avail
)
2466 map
= __shrink_map_lookup_stripes(map
, min_stripes
);
2467 *stripe_size
= max_avail
;
2470 for (i
= 0; i
< min_stripes
; i
++) {
2471 map
->stripes
[i
].dev
= devices
[index
].dev
;
2472 map
->stripes
[i
].physical
= devices
[index
].dev_offset
;
2473 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2475 map
->stripes
[i
].dev
= devices
[index
].dev
;
2476 map
->stripes
[i
].physical
= devices
[index
].dev_offset
+
2486 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2487 struct btrfs_root
*extent_root
,
2488 struct map_lookup
**map_ret
,
2489 u64
*num_bytes
, u64
*stripe_size
,
2490 u64 start
, u64 type
)
2492 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2493 struct btrfs_device
*device
= NULL
;
2494 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
2495 struct list_head
*cur
;
2496 struct map_lookup
*map
;
2497 struct extent_map_tree
*em_tree
;
2498 struct extent_map
*em
;
2499 struct btrfs_device_info
*devices_info
;
2500 struct list_head private_devs
;
2501 u64 calc_size
= 1024 * 1024 * 1024;
2508 int min_devices
; /* the min number of devices we need */
2513 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
2514 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
2516 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
2518 if (list_empty(&fs_devices
->alloc_list
))
2521 ret
= __btrfs_calc_nstripes(fs_devices
, type
, &num_stripes
,
2522 &min_stripes
, &sub_stripes
);
2526 devices_info
= kzalloc(sizeof(*devices_info
) * fs_devices
->rw_devices
,
2531 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2536 map
->num_stripes
= num_stripes
;
2538 cur
= fs_devices
->alloc_list
.next
;
2542 calc_size
= __btrfs_calc_stripe_size(fs_devices
, calc_size
, type
,
2545 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2546 min_free
= calc_size
* 2;
2549 min_free
= calc_size
;
2550 min_devices
= min_stripes
;
2553 INIT_LIST_HEAD(&private_devs
);
2554 while (index
< num_stripes
) {
2555 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
2556 BUG_ON(!device
->writeable
);
2557 if (device
->total_bytes
> device
->bytes_used
)
2558 avail
= device
->total_bytes
- device
->bytes_used
;
2563 if (device
->in_fs_metadata
&& avail
>= min_free
) {
2564 ret
= find_free_dev_extent(trans
, device
, min_free
,
2565 &devices_info
[i
].dev_offset
,
2566 &devices_info
[i
].max_avail
);
2568 list_move_tail(&device
->dev_alloc_list
,
2570 map
->stripes
[index
].dev
= device
;
2571 map
->stripes
[index
].physical
=
2572 devices_info
[i
].dev_offset
;
2574 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2575 map
->stripes
[index
].dev
= device
;
2576 map
->stripes
[index
].physical
=
2577 devices_info
[i
].dev_offset
+
2581 } else if (ret
!= -ENOSPC
)
2584 devices_info
[i
].dev
= device
;
2586 } else if (device
->in_fs_metadata
&&
2587 avail
>= BTRFS_STRIPE_LEN
) {
2588 devices_info
[i
].dev
= device
;
2589 devices_info
[i
].max_avail
= avail
;
2593 if (cur
== &fs_devices
->alloc_list
)
2597 list_splice(&private_devs
, &fs_devices
->alloc_list
);
2598 if (index
< num_stripes
) {
2599 if (index
>= min_stripes
) {
2600 num_stripes
= index
;
2601 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2602 num_stripes
/= sub_stripes
;
2603 num_stripes
*= sub_stripes
;
2606 map
= __shrink_map_lookup_stripes(map
, num_stripes
);
2607 } else if (i
>= min_devices
) {
2608 ret
= __btrfs_alloc_tiny_space(trans
, fs_devices
,
2609 devices_info
, i
, type
,
2619 map
->sector_size
= extent_root
->sectorsize
;
2620 map
->stripe_len
= BTRFS_STRIPE_LEN
;
2621 map
->io_align
= BTRFS_STRIPE_LEN
;
2622 map
->io_width
= BTRFS_STRIPE_LEN
;
2624 map
->sub_stripes
= sub_stripes
;
2627 *stripe_size
= calc_size
;
2628 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
2629 map
->num_stripes
, sub_stripes
);
2631 trace_btrfs_chunk_alloc(info
->chunk_root
, map
, start
, *num_bytes
);
2633 em
= alloc_extent_map(GFP_NOFS
);
2638 em
->bdev
= (struct block_device
*)map
;
2640 em
->len
= *num_bytes
;
2641 em
->block_start
= 0;
2642 em
->block_len
= em
->len
;
2644 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
2645 write_lock(&em_tree
->lock
);
2646 ret
= add_extent_mapping(em_tree
, em
);
2647 write_unlock(&em_tree
->lock
);
2649 free_extent_map(em
);
2651 ret
= btrfs_make_block_group(trans
, extent_root
, 0, type
,
2652 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2657 while (index
< map
->num_stripes
) {
2658 device
= map
->stripes
[index
].dev
;
2659 dev_offset
= map
->stripes
[index
].physical
;
2661 ret
= btrfs_alloc_dev_extent(trans
, device
,
2662 info
->chunk_root
->root_key
.objectid
,
2663 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2664 start
, dev_offset
, calc_size
);
2669 kfree(devices_info
);
2674 kfree(devices_info
);
2678 static int __finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
2679 struct btrfs_root
*extent_root
,
2680 struct map_lookup
*map
, u64 chunk_offset
,
2681 u64 chunk_size
, u64 stripe_size
)
2684 struct btrfs_key key
;
2685 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2686 struct btrfs_device
*device
;
2687 struct btrfs_chunk
*chunk
;
2688 struct btrfs_stripe
*stripe
;
2689 size_t item_size
= btrfs_chunk_item_size(map
->num_stripes
);
2693 chunk
= kzalloc(item_size
, GFP_NOFS
);
2698 while (index
< map
->num_stripes
) {
2699 device
= map
->stripes
[index
].dev
;
2700 device
->bytes_used
+= stripe_size
;
2701 ret
= btrfs_update_device(trans
, device
);
2707 stripe
= &chunk
->stripe
;
2708 while (index
< map
->num_stripes
) {
2709 device
= map
->stripes
[index
].dev
;
2710 dev_offset
= map
->stripes
[index
].physical
;
2712 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
2713 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
2714 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
2719 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
2720 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
2721 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
2722 btrfs_set_stack_chunk_type(chunk
, map
->type
);
2723 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
2724 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
2725 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
2726 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
2727 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
2729 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2730 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2731 key
.offset
= chunk_offset
;
2733 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
2736 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2737 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
, chunk
,
2747 * Chunk allocation falls into two parts. The first part does works
2748 * that make the new allocated chunk useable, but not do any operation
2749 * that modifies the chunk tree. The second part does the works that
2750 * require modifying the chunk tree. This division is important for the
2751 * bootstrap process of adding storage to a seed btrfs.
2753 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2754 struct btrfs_root
*extent_root
, u64 type
)
2759 struct map_lookup
*map
;
2760 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2763 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2768 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2769 &stripe_size
, chunk_offset
, type
);
2773 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2774 chunk_size
, stripe_size
);
2779 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
,
2780 struct btrfs_root
*root
,
2781 struct btrfs_device
*device
)
2784 u64 sys_chunk_offset
;
2788 u64 sys_stripe_size
;
2790 struct map_lookup
*map
;
2791 struct map_lookup
*sys_map
;
2792 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2793 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2796 ret
= find_next_chunk(fs_info
->chunk_root
,
2797 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, &chunk_offset
);
2800 alloc_profile
= BTRFS_BLOCK_GROUP_METADATA
|
2801 (fs_info
->metadata_alloc_profile
&
2802 fs_info
->avail_metadata_alloc_bits
);
2803 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2805 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2806 &stripe_size
, chunk_offset
, alloc_profile
);
2809 sys_chunk_offset
= chunk_offset
+ chunk_size
;
2811 alloc_profile
= BTRFS_BLOCK_GROUP_SYSTEM
|
2812 (fs_info
->system_alloc_profile
&
2813 fs_info
->avail_system_alloc_bits
);
2814 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2816 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &sys_map
,
2817 &sys_chunk_size
, &sys_stripe_size
,
2818 sys_chunk_offset
, alloc_profile
);
2821 ret
= btrfs_add_device(trans
, fs_info
->chunk_root
, device
);
2825 * Modifying chunk tree needs allocating new blocks from both
2826 * system block group and metadata block group. So we only can
2827 * do operations require modifying the chunk tree after both
2828 * block groups were created.
2830 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2831 chunk_size
, stripe_size
);
2834 ret
= __finish_chunk_alloc(trans
, extent_root
, sys_map
,
2835 sys_chunk_offset
, sys_chunk_size
,
2841 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
2843 struct extent_map
*em
;
2844 struct map_lookup
*map
;
2845 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2849 read_lock(&map_tree
->map_tree
.lock
);
2850 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2851 read_unlock(&map_tree
->map_tree
.lock
);
2855 if (btrfs_test_opt(root
, DEGRADED
)) {
2856 free_extent_map(em
);
2860 map
= (struct map_lookup
*)em
->bdev
;
2861 for (i
= 0; i
< map
->num_stripes
; i
++) {
2862 if (!map
->stripes
[i
].dev
->writeable
) {
2867 free_extent_map(em
);
2871 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
2873 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
2876 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
2878 struct extent_map
*em
;
2881 write_lock(&tree
->map_tree
.lock
);
2882 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
2884 remove_extent_mapping(&tree
->map_tree
, em
);
2885 write_unlock(&tree
->map_tree
.lock
);
2890 free_extent_map(em
);
2891 /* once for the tree */
2892 free_extent_map(em
);
2896 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
2898 struct extent_map
*em
;
2899 struct map_lookup
*map
;
2900 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2903 read_lock(&em_tree
->lock
);
2904 em
= lookup_extent_mapping(em_tree
, logical
, len
);
2905 read_unlock(&em_tree
->lock
);
2908 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2909 map
= (struct map_lookup
*)em
->bdev
;
2910 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
2911 ret
= map
->num_stripes
;
2912 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2913 ret
= map
->sub_stripes
;
2916 free_extent_map(em
);
2920 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
2924 if (map
->stripes
[optimal
].dev
->bdev
)
2926 for (i
= first
; i
< first
+ num
; i
++) {
2927 if (map
->stripes
[i
].dev
->bdev
)
2930 /* we couldn't find one that doesn't fail. Just return something
2931 * and the io error handling code will clean up eventually
2936 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2937 u64 logical
, u64
*length
,
2938 struct btrfs_multi_bio
**multi_ret
,
2939 int mirror_num
, struct page
*unplug_page
)
2941 struct extent_map
*em
;
2942 struct map_lookup
*map
;
2943 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2946 u64 stripe_end_offset
;
2950 int stripes_allocated
= 8;
2951 int stripes_required
= 1;
2956 struct btrfs_multi_bio
*multi
= NULL
;
2958 if (multi_ret
&& !(rw
& (REQ_WRITE
| REQ_DISCARD
)))
2959 stripes_allocated
= 1;
2962 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
2967 atomic_set(&multi
->error
, 0);
2970 read_lock(&em_tree
->lock
);
2971 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
2972 read_unlock(&em_tree
->lock
);
2974 if (!em
&& unplug_page
) {
2980 printk(KERN_CRIT
"unable to find logical %llu len %llu\n",
2981 (unsigned long long)logical
,
2982 (unsigned long long)*length
);
2986 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2987 map
= (struct map_lookup
*)em
->bdev
;
2988 offset
= logical
- em
->start
;
2990 if (mirror_num
> map
->num_stripes
)
2993 /* if our multi bio struct is too small, back off and try again */
2994 if (rw
& REQ_WRITE
) {
2995 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
2996 BTRFS_BLOCK_GROUP_DUP
)) {
2997 stripes_required
= map
->num_stripes
;
2999 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3000 stripes_required
= map
->sub_stripes
;
3004 if (rw
& REQ_DISCARD
) {
3005 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
3006 BTRFS_BLOCK_GROUP_RAID1
|
3007 BTRFS_BLOCK_GROUP_DUP
|
3008 BTRFS_BLOCK_GROUP_RAID10
)) {
3009 stripes_required
= map
->num_stripes
;
3012 if (multi_ret
&& (rw
& (REQ_WRITE
| REQ_DISCARD
)) &&
3013 stripes_allocated
< stripes_required
) {
3014 stripes_allocated
= map
->num_stripes
;
3015 free_extent_map(em
);
3021 * stripe_nr counts the total number of stripes we have to stride
3022 * to get to this block
3024 do_div(stripe_nr
, map
->stripe_len
);
3026 stripe_offset
= stripe_nr
* map
->stripe_len
;
3027 BUG_ON(offset
< stripe_offset
);
3029 /* stripe_offset is the offset of this block in its stripe*/
3030 stripe_offset
= offset
- stripe_offset
;
3032 if (rw
& REQ_DISCARD
)
3033 *length
= min_t(u64
, em
->len
- offset
, *length
);
3034 else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
3035 BTRFS_BLOCK_GROUP_RAID1
|
3036 BTRFS_BLOCK_GROUP_RAID10
|
3037 BTRFS_BLOCK_GROUP_DUP
)) {
3038 /* we limit the length of each bio to what fits in a stripe */
3039 *length
= min_t(u64
, em
->len
- offset
,
3040 map
->stripe_len
- stripe_offset
);
3042 *length
= em
->len
- offset
;
3045 if (!multi_ret
&& !unplug_page
)
3050 stripe_nr_orig
= stripe_nr
;
3051 stripe_nr_end
= (offset
+ *length
+ map
->stripe_len
- 1) &
3052 (~(map
->stripe_len
- 1));
3053 do_div(stripe_nr_end
, map
->stripe_len
);
3054 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
3056 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3057 if (rw
& REQ_DISCARD
)
3058 num_stripes
= min_t(u64
, map
->num_stripes
,
3059 stripe_nr_end
- stripe_nr_orig
);
3060 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
3061 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3062 if (unplug_page
|| (rw
& (REQ_WRITE
| REQ_DISCARD
)))
3063 num_stripes
= map
->num_stripes
;
3064 else if (mirror_num
)
3065 stripe_index
= mirror_num
- 1;
3067 stripe_index
= find_live_mirror(map
, 0,
3069 current
->pid
% map
->num_stripes
);
3072 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3073 if (rw
& (REQ_WRITE
| REQ_DISCARD
))
3074 num_stripes
= map
->num_stripes
;
3075 else if (mirror_num
)
3076 stripe_index
= mirror_num
- 1;
3078 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3079 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3081 stripe_index
= do_div(stripe_nr
, factor
);
3082 stripe_index
*= map
->sub_stripes
;
3084 if (unplug_page
|| (rw
& REQ_WRITE
))
3085 num_stripes
= map
->sub_stripes
;
3086 else if (rw
& REQ_DISCARD
)
3087 num_stripes
= min_t(u64
, map
->sub_stripes
*
3088 (stripe_nr_end
- stripe_nr_orig
),
3090 else if (mirror_num
)
3091 stripe_index
+= mirror_num
- 1;
3093 stripe_index
= find_live_mirror(map
, stripe_index
,
3094 map
->sub_stripes
, stripe_index
+
3095 current
->pid
% map
->sub_stripes
);
3099 * after this do_div call, stripe_nr is the number of stripes
3100 * on this device we have to walk to find the data, and
3101 * stripe_index is the number of our device in the stripe array
3103 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
3105 BUG_ON(stripe_index
>= map
->num_stripes
);
3107 if (rw
& REQ_DISCARD
) {
3108 for (i
= 0; i
< num_stripes
; i
++) {
3109 multi
->stripes
[i
].physical
=
3110 map
->stripes
[stripe_index
].physical
+
3111 stripe_offset
+ stripe_nr
* map
->stripe_len
;
3112 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
3114 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3116 u32 last_stripe
= 0;
3119 div_u64_rem(stripe_nr_end
- 1,
3123 for (j
= 0; j
< map
->num_stripes
; j
++) {
3126 div_u64_rem(stripe_nr_end
- 1 - j
,
3127 map
->num_stripes
, &test
);
3128 if (test
== stripe_index
)
3131 stripes
= stripe_nr_end
- 1 - j
;
3132 do_div(stripes
, map
->num_stripes
);
3133 multi
->stripes
[i
].length
= map
->stripe_len
*
3134 (stripes
- stripe_nr
+ 1);
3137 multi
->stripes
[i
].length
-=
3141 if (stripe_index
== last_stripe
)
3142 multi
->stripes
[i
].length
-=
3144 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3147 int factor
= map
->num_stripes
/
3149 u32 last_stripe
= 0;
3151 div_u64_rem(stripe_nr_end
- 1,
3152 factor
, &last_stripe
);
3153 last_stripe
*= map
->sub_stripes
;
3155 for (j
= 0; j
< factor
; j
++) {
3158 div_u64_rem(stripe_nr_end
- 1 - j
,
3162 stripe_index
/ map
->sub_stripes
)
3165 stripes
= stripe_nr_end
- 1 - j
;
3166 do_div(stripes
, factor
);
3167 multi
->stripes
[i
].length
= map
->stripe_len
*
3168 (stripes
- stripe_nr
+ 1);
3170 if (i
< map
->sub_stripes
) {
3171 multi
->stripes
[i
].length
-=
3173 if (i
== map
->sub_stripes
- 1)
3176 if (stripe_index
>= last_stripe
&&
3177 stripe_index
<= (last_stripe
+
3178 map
->sub_stripes
- 1)) {
3179 multi
->stripes
[i
].length
-=
3183 multi
->stripes
[i
].length
= *length
;
3186 if (stripe_index
== map
->num_stripes
) {
3187 /* This could only happen for RAID0/10 */
3193 for (i
= 0; i
< num_stripes
; i
++) {
3195 struct btrfs_device
*device
;
3196 struct backing_dev_info
*bdi
;
3198 device
= map
->stripes
[stripe_index
].dev
;
3200 bdi
= blk_get_backing_dev_info(device
->
3202 if (bdi
->unplug_io_fn
)
3203 bdi
->unplug_io_fn(bdi
,
3207 multi
->stripes
[i
].physical
=
3208 map
->stripes
[stripe_index
].physical
+
3210 stripe_nr
* map
->stripe_len
;
3211 multi
->stripes
[i
].dev
=
3212 map
->stripes
[stripe_index
].dev
;
3219 multi
->num_stripes
= num_stripes
;
3220 multi
->max_errors
= max_errors
;
3223 free_extent_map(em
);
3227 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
3228 u64 logical
, u64
*length
,
3229 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
3231 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
3235 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
3236 u64 chunk_start
, u64 physical
, u64 devid
,
3237 u64
**logical
, int *naddrs
, int *stripe_len
)
3239 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
3240 struct extent_map
*em
;
3241 struct map_lookup
*map
;
3248 read_lock(&em_tree
->lock
);
3249 em
= lookup_extent_mapping(em_tree
, chunk_start
, 1);
3250 read_unlock(&em_tree
->lock
);
3252 BUG_ON(!em
|| em
->start
!= chunk_start
);
3253 map
= (struct map_lookup
*)em
->bdev
;
3256 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
3257 do_div(length
, map
->num_stripes
/ map
->sub_stripes
);
3258 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
3259 do_div(length
, map
->num_stripes
);
3261 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
3264 for (i
= 0; i
< map
->num_stripes
; i
++) {
3265 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
3267 if (map
->stripes
[i
].physical
> physical
||
3268 map
->stripes
[i
].physical
+ length
<= physical
)
3271 stripe_nr
= physical
- map
->stripes
[i
].physical
;
3272 do_div(stripe_nr
, map
->stripe_len
);
3274 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3275 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
3276 do_div(stripe_nr
, map
->sub_stripes
);
3277 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3278 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
3280 bytenr
= chunk_start
+ stripe_nr
* map
->stripe_len
;
3281 WARN_ON(nr
>= map
->num_stripes
);
3282 for (j
= 0; j
< nr
; j
++) {
3283 if (buf
[j
] == bytenr
)
3287 WARN_ON(nr
>= map
->num_stripes
);
3294 *stripe_len
= map
->stripe_len
;
3296 free_extent_map(em
);
3300 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
3301 u64 logical
, struct page
*page
)
3303 u64 length
= PAGE_CACHE_SIZE
;
3304 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
3308 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
3310 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
3311 int is_orig_bio
= 0;
3314 atomic_inc(&multi
->error
);
3316 if (bio
== multi
->orig_bio
)
3319 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
3322 bio
= multi
->orig_bio
;
3324 bio
->bi_private
= multi
->private;
3325 bio
->bi_end_io
= multi
->end_io
;
3326 /* only send an error to the higher layers if it is
3327 * beyond the tolerance of the multi-bio
3329 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
3333 * this bio is actually up to date, we didn't
3334 * go over the max number of errors
3336 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
3341 bio_endio(bio
, err
);
3342 } else if (!is_orig_bio
) {
3347 struct async_sched
{
3350 struct btrfs_fs_info
*info
;
3351 struct btrfs_work work
;
3355 * see run_scheduled_bios for a description of why bios are collected for
3358 * This will add one bio to the pending list for a device and make sure
3359 * the work struct is scheduled.
3361 static noinline
int schedule_bio(struct btrfs_root
*root
,
3362 struct btrfs_device
*device
,
3363 int rw
, struct bio
*bio
)
3365 int should_queue
= 1;
3366 struct btrfs_pending_bios
*pending_bios
;
3368 /* don't bother with additional async steps for reads, right now */
3369 if (!(rw
& REQ_WRITE
)) {
3371 submit_bio(rw
, bio
);
3377 * nr_async_bios allows us to reliably return congestion to the
3378 * higher layers. Otherwise, the async bio makes it appear we have
3379 * made progress against dirty pages when we've really just put it
3380 * on a queue for later
3382 atomic_inc(&root
->fs_info
->nr_async_bios
);
3383 WARN_ON(bio
->bi_next
);
3384 bio
->bi_next
= NULL
;
3387 spin_lock(&device
->io_lock
);
3388 if (bio
->bi_rw
& REQ_SYNC
)
3389 pending_bios
= &device
->pending_sync_bios
;
3391 pending_bios
= &device
->pending_bios
;
3393 if (pending_bios
->tail
)
3394 pending_bios
->tail
->bi_next
= bio
;
3396 pending_bios
->tail
= bio
;
3397 if (!pending_bios
->head
)
3398 pending_bios
->head
= bio
;
3399 if (device
->running_pending
)
3402 spin_unlock(&device
->io_lock
);
3405 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
3410 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
3411 int mirror_num
, int async_submit
)
3413 struct btrfs_mapping_tree
*map_tree
;
3414 struct btrfs_device
*dev
;
3415 struct bio
*first_bio
= bio
;
3416 u64 logical
= (u64
)bio
->bi_sector
<< 9;
3419 struct btrfs_multi_bio
*multi
= NULL
;
3424 length
= bio
->bi_size
;
3425 map_tree
= &root
->fs_info
->mapping_tree
;
3426 map_length
= length
;
3428 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
3432 total_devs
= multi
->num_stripes
;
3433 if (map_length
< length
) {
3434 printk(KERN_CRIT
"mapping failed logical %llu bio len %llu "
3435 "len %llu\n", (unsigned long long)logical
,
3436 (unsigned long long)length
,
3437 (unsigned long long)map_length
);
3440 multi
->end_io
= first_bio
->bi_end_io
;
3441 multi
->private = first_bio
->bi_private
;
3442 multi
->orig_bio
= first_bio
;
3443 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
3445 while (dev_nr
< total_devs
) {
3446 if (total_devs
> 1) {
3447 if (dev_nr
< total_devs
- 1) {
3448 bio
= bio_clone(first_bio
, GFP_NOFS
);
3453 bio
->bi_private
= multi
;
3454 bio
->bi_end_io
= end_bio_multi_stripe
;
3456 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
3457 dev
= multi
->stripes
[dev_nr
].dev
;
3458 if (dev
&& dev
->bdev
&& (rw
!= WRITE
|| dev
->writeable
)) {
3459 bio
->bi_bdev
= dev
->bdev
;
3461 schedule_bio(root
, dev
, rw
, bio
);
3463 submit_bio(rw
, bio
);
3465 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
3466 bio
->bi_sector
= logical
>> 9;
3467 bio_endio(bio
, -EIO
);
3471 if (total_devs
== 1)
3476 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
3479 struct btrfs_device
*device
;
3480 struct btrfs_fs_devices
*cur_devices
;
3482 cur_devices
= root
->fs_info
->fs_devices
;
3483 while (cur_devices
) {
3485 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3486 device
= __find_device(&cur_devices
->devices
,
3491 cur_devices
= cur_devices
->seed
;
3496 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
3497 u64 devid
, u8
*dev_uuid
)
3499 struct btrfs_device
*device
;
3500 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
3502 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
3505 list_add(&device
->dev_list
,
3506 &fs_devices
->devices
);
3507 device
->dev_root
= root
->fs_info
->dev_root
;
3508 device
->devid
= devid
;
3509 device
->work
.func
= pending_bios_fn
;
3510 device
->fs_devices
= fs_devices
;
3511 device
->missing
= 1;
3512 fs_devices
->num_devices
++;
3513 fs_devices
->missing_devices
++;
3514 spin_lock_init(&device
->io_lock
);
3515 INIT_LIST_HEAD(&device
->dev_alloc_list
);
3516 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
3520 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
3521 struct extent_buffer
*leaf
,
3522 struct btrfs_chunk
*chunk
)
3524 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
3525 struct map_lookup
*map
;
3526 struct extent_map
*em
;
3530 u8 uuid
[BTRFS_UUID_SIZE
];
3535 logical
= key
->offset
;
3536 length
= btrfs_chunk_length(leaf
, chunk
);
3538 read_lock(&map_tree
->map_tree
.lock
);
3539 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
3540 read_unlock(&map_tree
->map_tree
.lock
);
3542 /* already mapped? */
3543 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
3544 free_extent_map(em
);
3547 free_extent_map(em
);
3550 em
= alloc_extent_map(GFP_NOFS
);
3553 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3554 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
3556 free_extent_map(em
);
3560 em
->bdev
= (struct block_device
*)map
;
3561 em
->start
= logical
;
3563 em
->block_start
= 0;
3564 em
->block_len
= em
->len
;
3566 map
->num_stripes
= num_stripes
;
3567 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
3568 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
3569 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
3570 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
3571 map
->type
= btrfs_chunk_type(leaf
, chunk
);
3572 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
3573 for (i
= 0; i
< num_stripes
; i
++) {
3574 map
->stripes
[i
].physical
=
3575 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
3576 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
3577 read_extent_buffer(leaf
, uuid
, (unsigned long)
3578 btrfs_stripe_dev_uuid_nr(chunk
, i
),
3580 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
3582 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
3584 free_extent_map(em
);
3587 if (!map
->stripes
[i
].dev
) {
3588 map
->stripes
[i
].dev
=
3589 add_missing_dev(root
, devid
, uuid
);
3590 if (!map
->stripes
[i
].dev
) {
3592 free_extent_map(em
);
3596 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
3599 write_lock(&map_tree
->map_tree
.lock
);
3600 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
3601 write_unlock(&map_tree
->map_tree
.lock
);
3603 free_extent_map(em
);
3608 static int fill_device_from_item(struct extent_buffer
*leaf
,
3609 struct btrfs_dev_item
*dev_item
,
3610 struct btrfs_device
*device
)
3614 device
->devid
= btrfs_device_id(leaf
, dev_item
);
3615 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
3616 device
->total_bytes
= device
->disk_total_bytes
;
3617 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
3618 device
->type
= btrfs_device_type(leaf
, dev_item
);
3619 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
3620 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
3621 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
3623 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
3624 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
3629 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
3631 struct btrfs_fs_devices
*fs_devices
;
3634 mutex_lock(&uuid_mutex
);
3636 fs_devices
= root
->fs_info
->fs_devices
->seed
;
3637 while (fs_devices
) {
3638 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3642 fs_devices
= fs_devices
->seed
;
3645 fs_devices
= find_fsid(fsid
);
3651 fs_devices
= clone_fs_devices(fs_devices
);
3652 if (IS_ERR(fs_devices
)) {
3653 ret
= PTR_ERR(fs_devices
);
3657 ret
= __btrfs_open_devices(fs_devices
, FMODE_READ
,
3658 root
->fs_info
->bdev_holder
);
3662 if (!fs_devices
->seeding
) {
3663 __btrfs_close_devices(fs_devices
);
3664 free_fs_devices(fs_devices
);
3669 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
3670 root
->fs_info
->fs_devices
->seed
= fs_devices
;
3672 mutex_unlock(&uuid_mutex
);
3676 static int read_one_dev(struct btrfs_root
*root
,
3677 struct extent_buffer
*leaf
,
3678 struct btrfs_dev_item
*dev_item
)
3680 struct btrfs_device
*device
;
3683 u8 fs_uuid
[BTRFS_UUID_SIZE
];
3684 u8 dev_uuid
[BTRFS_UUID_SIZE
];
3686 devid
= btrfs_device_id(leaf
, dev_item
);
3687 read_extent_buffer(leaf
, dev_uuid
,
3688 (unsigned long)btrfs_device_uuid(dev_item
),
3690 read_extent_buffer(leaf
, fs_uuid
,
3691 (unsigned long)btrfs_device_fsid(dev_item
),
3694 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
3695 ret
= open_seed_devices(root
, fs_uuid
);
3696 if (ret
&& !btrfs_test_opt(root
, DEGRADED
))
3700 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
3701 if (!device
|| !device
->bdev
) {
3702 if (!btrfs_test_opt(root
, DEGRADED
))
3706 printk(KERN_WARNING
"warning devid %llu missing\n",
3707 (unsigned long long)devid
);
3708 device
= add_missing_dev(root
, devid
, dev_uuid
);
3711 } else if (!device
->missing
) {
3713 * this happens when a device that was properly setup
3714 * in the device info lists suddenly goes bad.
3715 * device->bdev is NULL, and so we have to set
3716 * device->missing to one here
3718 root
->fs_info
->fs_devices
->missing_devices
++;
3719 device
->missing
= 1;
3723 if (device
->fs_devices
!= root
->fs_info
->fs_devices
) {
3724 BUG_ON(device
->writeable
);
3725 if (device
->generation
!=
3726 btrfs_device_generation(leaf
, dev_item
))
3730 fill_device_from_item(leaf
, dev_item
, device
);
3731 device
->dev_root
= root
->fs_info
->dev_root
;
3732 device
->in_fs_metadata
= 1;
3733 if (device
->writeable
)
3734 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
3739 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
3741 struct btrfs_dev_item
*dev_item
;
3743 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
3745 return read_one_dev(root
, buf
, dev_item
);
3748 int btrfs_read_sys_array(struct btrfs_root
*root
)
3750 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
3751 struct extent_buffer
*sb
;
3752 struct btrfs_disk_key
*disk_key
;
3753 struct btrfs_chunk
*chunk
;
3755 unsigned long sb_ptr
;
3761 struct btrfs_key key
;
3763 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
3764 BTRFS_SUPER_INFO_SIZE
);
3767 btrfs_set_buffer_uptodate(sb
);
3768 btrfs_set_buffer_lockdep_class(sb
, 0);
3770 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
3771 array_size
= btrfs_super_sys_array_size(super_copy
);
3773 ptr
= super_copy
->sys_chunk_array
;
3774 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
3777 while (cur
< array_size
) {
3778 disk_key
= (struct btrfs_disk_key
*)ptr
;
3779 btrfs_disk_key_to_cpu(&key
, disk_key
);
3781 len
= sizeof(*disk_key
); ptr
+= len
;
3785 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3786 chunk
= (struct btrfs_chunk
*)sb_ptr
;
3787 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
3790 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
3791 len
= btrfs_chunk_item_size(num_stripes
);
3800 free_extent_buffer(sb
);
3804 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
3806 struct btrfs_path
*path
;
3807 struct extent_buffer
*leaf
;
3808 struct btrfs_key key
;
3809 struct btrfs_key found_key
;
3813 root
= root
->fs_info
->chunk_root
;
3815 path
= btrfs_alloc_path();
3819 /* first we search for all of the device items, and then we
3820 * read in all of the chunk items. This way we can create chunk
3821 * mappings that reference all of the devices that are afound
3823 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3827 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3831 leaf
= path
->nodes
[0];
3832 slot
= path
->slots
[0];
3833 if (slot
>= btrfs_header_nritems(leaf
)) {
3834 ret
= btrfs_next_leaf(root
, path
);
3841 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3842 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3843 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
3845 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
3846 struct btrfs_dev_item
*dev_item
;
3847 dev_item
= btrfs_item_ptr(leaf
, slot
,
3848 struct btrfs_dev_item
);
3849 ret
= read_one_dev(root
, leaf
, dev_item
);
3853 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3854 struct btrfs_chunk
*chunk
;
3855 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3856 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
3862 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3864 btrfs_release_path(root
, path
);
3869 btrfs_free_path(path
);