Revert "MIPS: MTX-1: Make au1000_eth probe all PHY
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / btrfs / volumes.c
blob5d56a8dab69b2eda4a2e6d1fb518a719bd2acb51
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/iocontext.h>
24 #include <linux/capability.h>
25 #include <asm/div64.h>
26 #include "compat.h"
27 #include "ctree.h"
28 #include "extent_map.h"
29 #include "disk-io.h"
30 #include "transaction.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "async-thread.h"
35 struct map_lookup {
36 u64 type;
37 int io_align;
38 int io_width;
39 int stripe_len;
40 int sector_size;
41 int num_stripes;
42 int sub_stripes;
43 struct btrfs_bio_stripe stripes[];
46 static int init_first_rw_device(struct btrfs_trans_handle *trans,
47 struct btrfs_root *root,
48 struct btrfs_device *device);
49 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
51 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
52 (sizeof(struct btrfs_bio_stripe) * (n)))
54 static DEFINE_MUTEX(uuid_mutex);
55 static LIST_HEAD(fs_uuids);
57 void btrfs_lock_volumes(void)
59 mutex_lock(&uuid_mutex);
62 void btrfs_unlock_volumes(void)
64 mutex_unlock(&uuid_mutex);
67 static void lock_chunks(struct btrfs_root *root)
69 mutex_lock(&root->fs_info->chunk_mutex);
72 static void unlock_chunks(struct btrfs_root *root)
74 mutex_unlock(&root->fs_info->chunk_mutex);
77 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
79 struct btrfs_device *device;
80 WARN_ON(fs_devices->opened);
81 while (!list_empty(&fs_devices->devices)) {
82 device = list_entry(fs_devices->devices.next,
83 struct btrfs_device, dev_list);
84 list_del(&device->dev_list);
85 kfree(device->name);
86 kfree(device);
88 kfree(fs_devices);
91 int btrfs_cleanup_fs_uuids(void)
93 struct btrfs_fs_devices *fs_devices;
95 while (!list_empty(&fs_uuids)) {
96 fs_devices = list_entry(fs_uuids.next,
97 struct btrfs_fs_devices, list);
98 list_del(&fs_devices->list);
99 free_fs_devices(fs_devices);
101 return 0;
104 static noinline struct btrfs_device *__find_device(struct list_head *head,
105 u64 devid, u8 *uuid)
107 struct btrfs_device *dev;
109 list_for_each_entry(dev, head, dev_list) {
110 if (dev->devid == devid &&
111 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
112 return dev;
115 return NULL;
118 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 struct btrfs_fs_devices *fs_devices;
122 list_for_each_entry(fs_devices, &fs_uuids, list) {
123 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
124 return fs_devices;
126 return NULL;
129 static void requeue_list(struct btrfs_pending_bios *pending_bios,
130 struct bio *head, struct bio *tail)
133 struct bio *old_head;
135 old_head = pending_bios->head;
136 pending_bios->head = head;
137 if (pending_bios->tail)
138 tail->bi_next = old_head;
139 else
140 pending_bios->tail = tail;
144 * we try to collect pending bios for a device so we don't get a large
145 * number of procs sending bios down to the same device. This greatly
146 * improves the schedulers ability to collect and merge the bios.
148 * But, it also turns into a long list of bios to process and that is sure
149 * to eventually make the worker thread block. The solution here is to
150 * make some progress and then put this work struct back at the end of
151 * the list if the block device is congested. This way, multiple devices
152 * can make progress from a single worker thread.
154 static noinline int run_scheduled_bios(struct btrfs_device *device)
156 struct bio *pending;
157 struct backing_dev_info *bdi;
158 struct btrfs_fs_info *fs_info;
159 struct btrfs_pending_bios *pending_bios;
160 struct bio *tail;
161 struct bio *cur;
162 int again = 0;
163 unsigned long num_run;
164 unsigned long num_sync_run;
165 unsigned long batch_run = 0;
166 unsigned long limit;
167 unsigned long last_waited = 0;
168 int force_reg = 0;
170 bdi = blk_get_backing_dev_info(device->bdev);
171 fs_info = device->dev_root->fs_info;
172 limit = btrfs_async_submit_limit(fs_info);
173 limit = limit * 2 / 3;
175 /* we want to make sure that every time we switch from the sync
176 * list to the normal list, we unplug
178 num_sync_run = 0;
180 loop:
181 spin_lock(&device->io_lock);
183 loop_lock:
184 num_run = 0;
186 /* take all the bios off the list at once and process them
187 * later on (without the lock held). But, remember the
188 * tail and other pointers so the bios can be properly reinserted
189 * into the list if we hit congestion
191 if (!force_reg && device->pending_sync_bios.head) {
192 pending_bios = &device->pending_sync_bios;
193 force_reg = 1;
194 } else {
195 pending_bios = &device->pending_bios;
196 force_reg = 0;
199 pending = pending_bios->head;
200 tail = pending_bios->tail;
201 WARN_ON(pending && !tail);
204 * if pending was null this time around, no bios need processing
205 * at all and we can stop. Otherwise it'll loop back up again
206 * and do an additional check so no bios are missed.
208 * device->running_pending is used to synchronize with the
209 * schedule_bio code.
211 if (device->pending_sync_bios.head == NULL &&
212 device->pending_bios.head == NULL) {
213 again = 0;
214 device->running_pending = 0;
215 } else {
216 again = 1;
217 device->running_pending = 1;
220 pending_bios->head = NULL;
221 pending_bios->tail = NULL;
223 spin_unlock(&device->io_lock);
226 * if we're doing the regular priority list, make sure we unplug
227 * for any high prio bios we've sent down
229 if (pending_bios == &device->pending_bios && num_sync_run > 0) {
230 num_sync_run = 0;
231 blk_run_backing_dev(bdi, NULL);
234 while (pending) {
236 rmb();
237 /* we want to work on both lists, but do more bios on the
238 * sync list than the regular list
240 if ((num_run > 32 &&
241 pending_bios != &device->pending_sync_bios &&
242 device->pending_sync_bios.head) ||
243 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
244 device->pending_bios.head)) {
245 spin_lock(&device->io_lock);
246 requeue_list(pending_bios, pending, tail);
247 goto loop_lock;
250 cur = pending;
251 pending = pending->bi_next;
252 cur->bi_next = NULL;
253 atomic_dec(&fs_info->nr_async_bios);
255 if (atomic_read(&fs_info->nr_async_bios) < limit &&
256 waitqueue_active(&fs_info->async_submit_wait))
257 wake_up(&fs_info->async_submit_wait);
259 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
260 submit_bio(cur->bi_rw, cur);
261 num_run++;
262 batch_run++;
264 if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
265 num_sync_run++;
267 if (need_resched()) {
268 if (num_sync_run) {
269 blk_run_backing_dev(bdi, NULL);
270 num_sync_run = 0;
272 cond_resched();
276 * we made progress, there is more work to do and the bdi
277 * is now congested. Back off and let other work structs
278 * run instead
280 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
281 fs_info->fs_devices->open_devices > 1) {
282 struct io_context *ioc;
284 ioc = current->io_context;
287 * the main goal here is that we don't want to
288 * block if we're going to be able to submit
289 * more requests without blocking.
291 * This code does two great things, it pokes into
292 * the elevator code from a filesystem _and_
293 * it makes assumptions about how batching works.
295 if (ioc && ioc->nr_batch_requests > 0 &&
296 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
297 (last_waited == 0 ||
298 ioc->last_waited == last_waited)) {
300 * we want to go through our batch of
301 * requests and stop. So, we copy out
302 * the ioc->last_waited time and test
303 * against it before looping
305 last_waited = ioc->last_waited;
306 if (need_resched()) {
307 if (num_sync_run) {
308 blk_run_backing_dev(bdi, NULL);
309 num_sync_run = 0;
311 cond_resched();
313 continue;
315 spin_lock(&device->io_lock);
316 requeue_list(pending_bios, pending, tail);
317 device->running_pending = 1;
319 spin_unlock(&device->io_lock);
320 btrfs_requeue_work(&device->work);
321 goto done;
325 if (num_sync_run) {
326 num_sync_run = 0;
327 blk_run_backing_dev(bdi, NULL);
330 cond_resched();
331 if (again)
332 goto loop;
334 spin_lock(&device->io_lock);
335 if (device->pending_bios.head || device->pending_sync_bios.head)
336 goto loop_lock;
337 spin_unlock(&device->io_lock);
340 * IO has already been through a long path to get here. Checksumming,
341 * async helper threads, perhaps compression. We've done a pretty
342 * good job of collecting a batch of IO and should just unplug
343 * the device right away.
345 * This will help anyone who is waiting on the IO, they might have
346 * already unplugged, but managed to do so before the bio they
347 * cared about found its way down here.
349 blk_run_backing_dev(bdi, NULL);
350 done:
351 return 0;
354 static void pending_bios_fn(struct btrfs_work *work)
356 struct btrfs_device *device;
358 device = container_of(work, struct btrfs_device, work);
359 run_scheduled_bios(device);
362 static noinline int device_list_add(const char *path,
363 struct btrfs_super_block *disk_super,
364 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
366 struct btrfs_device *device;
367 struct btrfs_fs_devices *fs_devices;
368 u64 found_transid = btrfs_super_generation(disk_super);
370 fs_devices = find_fsid(disk_super->fsid);
371 if (!fs_devices) {
372 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
373 if (!fs_devices)
374 return -ENOMEM;
375 INIT_LIST_HEAD(&fs_devices->devices);
376 INIT_LIST_HEAD(&fs_devices->alloc_list);
377 list_add(&fs_devices->list, &fs_uuids);
378 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
379 fs_devices->latest_devid = devid;
380 fs_devices->latest_trans = found_transid;
381 mutex_init(&fs_devices->device_list_mutex);
382 device = NULL;
383 } else {
384 device = __find_device(&fs_devices->devices, devid,
385 disk_super->dev_item.uuid);
387 if (!device) {
388 if (fs_devices->opened)
389 return -EBUSY;
391 device = kzalloc(sizeof(*device), GFP_NOFS);
392 if (!device) {
393 /* we can safely leave the fs_devices entry around */
394 return -ENOMEM;
396 device->devid = devid;
397 device->work.func = pending_bios_fn;
398 memcpy(device->uuid, disk_super->dev_item.uuid,
399 BTRFS_UUID_SIZE);
400 device->barriers = 1;
401 spin_lock_init(&device->io_lock);
402 device->name = kstrdup(path, GFP_NOFS);
403 if (!device->name) {
404 kfree(device);
405 return -ENOMEM;
407 INIT_LIST_HEAD(&device->dev_alloc_list);
409 mutex_lock(&fs_devices->device_list_mutex);
410 list_add(&device->dev_list, &fs_devices->devices);
411 mutex_unlock(&fs_devices->device_list_mutex);
413 device->fs_devices = fs_devices;
414 fs_devices->num_devices++;
417 if (found_transid > fs_devices->latest_trans) {
418 fs_devices->latest_devid = devid;
419 fs_devices->latest_trans = found_transid;
421 *fs_devices_ret = fs_devices;
422 return 0;
425 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
427 struct btrfs_fs_devices *fs_devices;
428 struct btrfs_device *device;
429 struct btrfs_device *orig_dev;
431 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
432 if (!fs_devices)
433 return ERR_PTR(-ENOMEM);
435 INIT_LIST_HEAD(&fs_devices->devices);
436 INIT_LIST_HEAD(&fs_devices->alloc_list);
437 INIT_LIST_HEAD(&fs_devices->list);
438 mutex_init(&fs_devices->device_list_mutex);
439 fs_devices->latest_devid = orig->latest_devid;
440 fs_devices->latest_trans = orig->latest_trans;
441 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
443 mutex_lock(&orig->device_list_mutex);
444 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
445 device = kzalloc(sizeof(*device), GFP_NOFS);
446 if (!device)
447 goto error;
449 device->name = kstrdup(orig_dev->name, GFP_NOFS);
450 if (!device->name) {
451 kfree(device);
452 goto error;
455 device->devid = orig_dev->devid;
456 device->work.func = pending_bios_fn;
457 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
458 device->barriers = 1;
459 spin_lock_init(&device->io_lock);
460 INIT_LIST_HEAD(&device->dev_list);
461 INIT_LIST_HEAD(&device->dev_alloc_list);
463 list_add(&device->dev_list, &fs_devices->devices);
464 device->fs_devices = fs_devices;
465 fs_devices->num_devices++;
467 mutex_unlock(&orig->device_list_mutex);
468 return fs_devices;
469 error:
470 mutex_unlock(&orig->device_list_mutex);
471 free_fs_devices(fs_devices);
472 return ERR_PTR(-ENOMEM);
475 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
477 struct btrfs_device *device, *next;
479 mutex_lock(&uuid_mutex);
480 again:
481 mutex_lock(&fs_devices->device_list_mutex);
482 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
483 if (device->in_fs_metadata)
484 continue;
486 if (device->bdev) {
487 close_bdev_exclusive(device->bdev, device->mode);
488 device->bdev = NULL;
489 fs_devices->open_devices--;
491 if (device->writeable) {
492 list_del_init(&device->dev_alloc_list);
493 device->writeable = 0;
494 fs_devices->rw_devices--;
496 list_del_init(&device->dev_list);
497 fs_devices->num_devices--;
498 kfree(device->name);
499 kfree(device);
501 mutex_unlock(&fs_devices->device_list_mutex);
503 if (fs_devices->seed) {
504 fs_devices = fs_devices->seed;
505 goto again;
508 mutex_unlock(&uuid_mutex);
509 return 0;
512 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
514 struct btrfs_device *device;
516 if (--fs_devices->opened > 0)
517 return 0;
519 list_for_each_entry(device, &fs_devices->devices, dev_list) {
520 if (device->bdev) {
521 close_bdev_exclusive(device->bdev, device->mode);
522 fs_devices->open_devices--;
524 if (device->writeable) {
525 list_del_init(&device->dev_alloc_list);
526 fs_devices->rw_devices--;
529 device->bdev = NULL;
530 device->writeable = 0;
531 device->in_fs_metadata = 0;
533 WARN_ON(fs_devices->open_devices);
534 WARN_ON(fs_devices->rw_devices);
535 fs_devices->opened = 0;
536 fs_devices->seeding = 0;
538 return 0;
541 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
543 struct btrfs_fs_devices *seed_devices = NULL;
544 int ret;
546 mutex_lock(&uuid_mutex);
547 ret = __btrfs_close_devices(fs_devices);
548 if (!fs_devices->opened) {
549 seed_devices = fs_devices->seed;
550 fs_devices->seed = NULL;
552 mutex_unlock(&uuid_mutex);
554 while (seed_devices) {
555 fs_devices = seed_devices;
556 seed_devices = fs_devices->seed;
557 __btrfs_close_devices(fs_devices);
558 free_fs_devices(fs_devices);
560 return ret;
563 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
564 fmode_t flags, void *holder)
566 struct block_device *bdev;
567 struct list_head *head = &fs_devices->devices;
568 struct btrfs_device *device;
569 struct block_device *latest_bdev = NULL;
570 struct buffer_head *bh;
571 struct btrfs_super_block *disk_super;
572 u64 latest_devid = 0;
573 u64 latest_transid = 0;
574 u64 devid;
575 int seeding = 1;
576 int ret = 0;
578 list_for_each_entry(device, head, dev_list) {
579 if (device->bdev)
580 continue;
581 if (!device->name)
582 continue;
584 bdev = open_bdev_exclusive(device->name, flags, holder);
585 if (IS_ERR(bdev)) {
586 printk(KERN_INFO "open %s failed\n", device->name);
587 goto error;
589 set_blocksize(bdev, 4096);
591 bh = btrfs_read_dev_super(bdev);
592 if (!bh)
593 goto error_close;
595 disk_super = (struct btrfs_super_block *)bh->b_data;
596 devid = le64_to_cpu(disk_super->dev_item.devid);
597 if (devid != device->devid)
598 goto error_brelse;
600 if (memcmp(device->uuid, disk_super->dev_item.uuid,
601 BTRFS_UUID_SIZE))
602 goto error_brelse;
604 device->generation = btrfs_super_generation(disk_super);
605 if (!latest_transid || device->generation > latest_transid) {
606 latest_devid = devid;
607 latest_transid = device->generation;
608 latest_bdev = bdev;
611 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
612 device->writeable = 0;
613 } else {
614 device->writeable = !bdev_read_only(bdev);
615 seeding = 0;
618 device->bdev = bdev;
619 device->in_fs_metadata = 0;
620 device->mode = flags;
622 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
623 fs_devices->rotating = 1;
625 fs_devices->open_devices++;
626 if (device->writeable) {
627 fs_devices->rw_devices++;
628 list_add(&device->dev_alloc_list,
629 &fs_devices->alloc_list);
631 continue;
633 error_brelse:
634 brelse(bh);
635 error_close:
636 close_bdev_exclusive(bdev, FMODE_READ);
637 error:
638 continue;
640 if (fs_devices->open_devices == 0) {
641 ret = -EIO;
642 goto out;
644 fs_devices->seeding = seeding;
645 fs_devices->opened = 1;
646 fs_devices->latest_bdev = latest_bdev;
647 fs_devices->latest_devid = latest_devid;
648 fs_devices->latest_trans = latest_transid;
649 fs_devices->total_rw_bytes = 0;
650 out:
651 return ret;
654 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
655 fmode_t flags, void *holder)
657 int ret;
659 mutex_lock(&uuid_mutex);
660 if (fs_devices->opened) {
661 fs_devices->opened++;
662 ret = 0;
663 } else {
664 ret = __btrfs_open_devices(fs_devices, flags, holder);
666 mutex_unlock(&uuid_mutex);
667 return ret;
670 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
671 struct btrfs_fs_devices **fs_devices_ret)
673 struct btrfs_super_block *disk_super;
674 struct block_device *bdev;
675 struct buffer_head *bh;
676 int ret;
677 u64 devid;
678 u64 transid;
680 mutex_lock(&uuid_mutex);
682 bdev = open_bdev_exclusive(path, flags, holder);
684 if (IS_ERR(bdev)) {
685 ret = PTR_ERR(bdev);
686 goto error;
689 ret = set_blocksize(bdev, 4096);
690 if (ret)
691 goto error_close;
692 bh = btrfs_read_dev_super(bdev);
693 if (!bh) {
694 ret = -EIO;
695 goto error_close;
697 disk_super = (struct btrfs_super_block *)bh->b_data;
698 devid = le64_to_cpu(disk_super->dev_item.devid);
699 transid = btrfs_super_generation(disk_super);
700 if (disk_super->label[0])
701 printk(KERN_INFO "device label %s ", disk_super->label);
702 else {
703 /* FIXME, make a readl uuid parser */
704 printk(KERN_INFO "device fsid %llx-%llx ",
705 *(unsigned long long *)disk_super->fsid,
706 *(unsigned long long *)(disk_super->fsid + 8));
708 printk(KERN_CONT "devid %llu transid %llu %s\n",
709 (unsigned long long)devid, (unsigned long long)transid, path);
710 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
712 brelse(bh);
713 error_close:
714 close_bdev_exclusive(bdev, flags);
715 error:
716 mutex_unlock(&uuid_mutex);
717 return ret;
721 * this uses a pretty simple search, the expectation is that it is
722 * called very infrequently and that a given device has a small number
723 * of extents
725 int find_free_dev_extent(struct btrfs_trans_handle *trans,
726 struct btrfs_device *device, u64 num_bytes,
727 u64 *start, u64 *max_avail)
729 struct btrfs_key key;
730 struct btrfs_root *root = device->dev_root;
731 struct btrfs_dev_extent *dev_extent = NULL;
732 struct btrfs_path *path;
733 u64 hole_size = 0;
734 u64 last_byte = 0;
735 u64 search_start = 0;
736 u64 search_end = device->total_bytes;
737 int ret;
738 int slot = 0;
739 int start_found;
740 struct extent_buffer *l;
742 path = btrfs_alloc_path();
743 if (!path)
744 return -ENOMEM;
745 path->reada = 2;
746 start_found = 0;
748 /* FIXME use last free of some kind */
750 /* we don't want to overwrite the superblock on the drive,
751 * so we make sure to start at an offset of at least 1MB
753 search_start = max((u64)1024 * 1024, search_start);
755 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
756 search_start = max(root->fs_info->alloc_start, search_start);
758 key.objectid = device->devid;
759 key.offset = search_start;
760 key.type = BTRFS_DEV_EXTENT_KEY;
761 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
762 if (ret < 0)
763 goto error;
764 if (ret > 0) {
765 ret = btrfs_previous_item(root, path, key.objectid, key.type);
766 if (ret < 0)
767 goto error;
768 if (ret > 0)
769 start_found = 1;
771 l = path->nodes[0];
772 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
773 while (1) {
774 l = path->nodes[0];
775 slot = path->slots[0];
776 if (slot >= btrfs_header_nritems(l)) {
777 ret = btrfs_next_leaf(root, path);
778 if (ret == 0)
779 continue;
780 if (ret < 0)
781 goto error;
782 no_more_items:
783 if (!start_found) {
784 if (search_start >= search_end) {
785 ret = -ENOSPC;
786 goto error;
788 *start = search_start;
789 start_found = 1;
790 goto check_pending;
792 *start = last_byte > search_start ?
793 last_byte : search_start;
794 if (search_end <= *start) {
795 ret = -ENOSPC;
796 goto error;
798 goto check_pending;
800 btrfs_item_key_to_cpu(l, &key, slot);
802 if (key.objectid < device->devid)
803 goto next;
805 if (key.objectid > device->devid)
806 goto no_more_items;
808 if (key.offset >= search_start && key.offset > last_byte &&
809 start_found) {
810 if (last_byte < search_start)
811 last_byte = search_start;
812 hole_size = key.offset - last_byte;
814 if (hole_size > *max_avail)
815 *max_avail = hole_size;
817 if (key.offset > last_byte &&
818 hole_size >= num_bytes) {
819 *start = last_byte;
820 goto check_pending;
823 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
824 goto next;
826 start_found = 1;
827 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
828 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
829 next:
830 path->slots[0]++;
831 cond_resched();
833 check_pending:
834 /* we have to make sure we didn't find an extent that has already
835 * been allocated by the map tree or the original allocation
837 BUG_ON(*start < search_start);
839 if (*start + num_bytes > search_end) {
840 ret = -ENOSPC;
841 goto error;
843 /* check for pending inserts here */
844 ret = 0;
846 error:
847 btrfs_free_path(path);
848 return ret;
851 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
852 struct btrfs_device *device,
853 u64 start)
855 int ret;
856 struct btrfs_path *path;
857 struct btrfs_root *root = device->dev_root;
858 struct btrfs_key key;
859 struct btrfs_key found_key;
860 struct extent_buffer *leaf = NULL;
861 struct btrfs_dev_extent *extent = NULL;
863 path = btrfs_alloc_path();
864 if (!path)
865 return -ENOMEM;
867 key.objectid = device->devid;
868 key.offset = start;
869 key.type = BTRFS_DEV_EXTENT_KEY;
871 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
872 if (ret > 0) {
873 ret = btrfs_previous_item(root, path, key.objectid,
874 BTRFS_DEV_EXTENT_KEY);
875 BUG_ON(ret);
876 leaf = path->nodes[0];
877 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
878 extent = btrfs_item_ptr(leaf, path->slots[0],
879 struct btrfs_dev_extent);
880 BUG_ON(found_key.offset > start || found_key.offset +
881 btrfs_dev_extent_length(leaf, extent) < start);
882 ret = 0;
883 } else if (ret == 0) {
884 leaf = path->nodes[0];
885 extent = btrfs_item_ptr(leaf, path->slots[0],
886 struct btrfs_dev_extent);
888 BUG_ON(ret);
890 if (device->bytes_used > 0)
891 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
892 ret = btrfs_del_item(trans, root, path);
893 BUG_ON(ret);
895 btrfs_free_path(path);
896 return ret;
899 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
900 struct btrfs_device *device,
901 u64 chunk_tree, u64 chunk_objectid,
902 u64 chunk_offset, u64 start, u64 num_bytes)
904 int ret;
905 struct btrfs_path *path;
906 struct btrfs_root *root = device->dev_root;
907 struct btrfs_dev_extent *extent;
908 struct extent_buffer *leaf;
909 struct btrfs_key key;
911 WARN_ON(!device->in_fs_metadata);
912 path = btrfs_alloc_path();
913 if (!path)
914 return -ENOMEM;
916 key.objectid = device->devid;
917 key.offset = start;
918 key.type = BTRFS_DEV_EXTENT_KEY;
919 ret = btrfs_insert_empty_item(trans, root, path, &key,
920 sizeof(*extent));
921 BUG_ON(ret);
923 leaf = path->nodes[0];
924 extent = btrfs_item_ptr(leaf, path->slots[0],
925 struct btrfs_dev_extent);
926 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
927 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
928 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
930 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
931 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
932 BTRFS_UUID_SIZE);
934 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
935 btrfs_mark_buffer_dirty(leaf);
936 btrfs_free_path(path);
937 return ret;
940 static noinline int find_next_chunk(struct btrfs_root *root,
941 u64 objectid, u64 *offset)
943 struct btrfs_path *path;
944 int ret;
945 struct btrfs_key key;
946 struct btrfs_chunk *chunk;
947 struct btrfs_key found_key;
949 path = btrfs_alloc_path();
950 BUG_ON(!path);
952 key.objectid = objectid;
953 key.offset = (u64)-1;
954 key.type = BTRFS_CHUNK_ITEM_KEY;
956 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
957 if (ret < 0)
958 goto error;
960 BUG_ON(ret == 0);
962 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
963 if (ret) {
964 *offset = 0;
965 } else {
966 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
967 path->slots[0]);
968 if (found_key.objectid != objectid)
969 *offset = 0;
970 else {
971 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
972 struct btrfs_chunk);
973 *offset = found_key.offset +
974 btrfs_chunk_length(path->nodes[0], chunk);
977 ret = 0;
978 error:
979 btrfs_free_path(path);
980 return ret;
983 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
985 int ret;
986 struct btrfs_key key;
987 struct btrfs_key found_key;
988 struct btrfs_path *path;
990 root = root->fs_info->chunk_root;
992 path = btrfs_alloc_path();
993 if (!path)
994 return -ENOMEM;
996 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
997 key.type = BTRFS_DEV_ITEM_KEY;
998 key.offset = (u64)-1;
1000 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1001 if (ret < 0)
1002 goto error;
1004 BUG_ON(ret == 0);
1006 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1007 BTRFS_DEV_ITEM_KEY);
1008 if (ret) {
1009 *objectid = 1;
1010 } else {
1011 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1012 path->slots[0]);
1013 *objectid = found_key.offset + 1;
1015 ret = 0;
1016 error:
1017 btrfs_free_path(path);
1018 return ret;
1022 * the device information is stored in the chunk root
1023 * the btrfs_device struct should be fully filled in
1025 int btrfs_add_device(struct btrfs_trans_handle *trans,
1026 struct btrfs_root *root,
1027 struct btrfs_device *device)
1029 int ret;
1030 struct btrfs_path *path;
1031 struct btrfs_dev_item *dev_item;
1032 struct extent_buffer *leaf;
1033 struct btrfs_key key;
1034 unsigned long ptr;
1036 root = root->fs_info->chunk_root;
1038 path = btrfs_alloc_path();
1039 if (!path)
1040 return -ENOMEM;
1042 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1043 key.type = BTRFS_DEV_ITEM_KEY;
1044 key.offset = device->devid;
1046 ret = btrfs_insert_empty_item(trans, root, path, &key,
1047 sizeof(*dev_item));
1048 if (ret)
1049 goto out;
1051 leaf = path->nodes[0];
1052 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1054 btrfs_set_device_id(leaf, dev_item, device->devid);
1055 btrfs_set_device_generation(leaf, dev_item, 0);
1056 btrfs_set_device_type(leaf, dev_item, device->type);
1057 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1058 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1059 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1060 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1061 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1062 btrfs_set_device_group(leaf, dev_item, 0);
1063 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1064 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1065 btrfs_set_device_start_offset(leaf, dev_item, 0);
1067 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1068 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1069 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1070 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1071 btrfs_mark_buffer_dirty(leaf);
1073 ret = 0;
1074 out:
1075 btrfs_free_path(path);
1076 return ret;
1079 static int btrfs_rm_dev_item(struct btrfs_root *root,
1080 struct btrfs_device *device)
1082 int ret;
1083 struct btrfs_path *path;
1084 struct btrfs_key key;
1085 struct btrfs_trans_handle *trans;
1087 root = root->fs_info->chunk_root;
1089 path = btrfs_alloc_path();
1090 if (!path)
1091 return -ENOMEM;
1093 trans = btrfs_start_transaction(root, 1);
1094 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1095 key.type = BTRFS_DEV_ITEM_KEY;
1096 key.offset = device->devid;
1097 lock_chunks(root);
1099 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1100 if (ret < 0)
1101 goto out;
1103 if (ret > 0) {
1104 ret = -ENOENT;
1105 goto out;
1108 ret = btrfs_del_item(trans, root, path);
1109 if (ret)
1110 goto out;
1111 out:
1112 btrfs_free_path(path);
1113 unlock_chunks(root);
1114 btrfs_commit_transaction(trans, root);
1115 return ret;
1118 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1120 struct btrfs_device *device;
1121 struct btrfs_device *next_device;
1122 struct block_device *bdev;
1123 struct buffer_head *bh = NULL;
1124 struct btrfs_super_block *disk_super;
1125 u64 all_avail;
1126 u64 devid;
1127 u64 num_devices;
1128 u8 *dev_uuid;
1129 int ret = 0;
1131 mutex_lock(&uuid_mutex);
1132 mutex_lock(&root->fs_info->volume_mutex);
1134 all_avail = root->fs_info->avail_data_alloc_bits |
1135 root->fs_info->avail_system_alloc_bits |
1136 root->fs_info->avail_metadata_alloc_bits;
1138 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1139 root->fs_info->fs_devices->num_devices <= 4) {
1140 printk(KERN_ERR "btrfs: unable to go below four devices "
1141 "on raid10\n");
1142 ret = -EINVAL;
1143 goto out;
1146 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1147 root->fs_info->fs_devices->num_devices <= 2) {
1148 printk(KERN_ERR "btrfs: unable to go below two "
1149 "devices on raid1\n");
1150 ret = -EINVAL;
1151 goto out;
1154 if (strcmp(device_path, "missing") == 0) {
1155 struct list_head *devices;
1156 struct btrfs_device *tmp;
1158 device = NULL;
1159 devices = &root->fs_info->fs_devices->devices;
1160 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1161 list_for_each_entry(tmp, devices, dev_list) {
1162 if (tmp->in_fs_metadata && !tmp->bdev) {
1163 device = tmp;
1164 break;
1167 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1168 bdev = NULL;
1169 bh = NULL;
1170 disk_super = NULL;
1171 if (!device) {
1172 printk(KERN_ERR "btrfs: no missing devices found to "
1173 "remove\n");
1174 goto out;
1176 } else {
1177 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1178 root->fs_info->bdev_holder);
1179 if (IS_ERR(bdev)) {
1180 ret = PTR_ERR(bdev);
1181 goto out;
1184 set_blocksize(bdev, 4096);
1185 bh = btrfs_read_dev_super(bdev);
1186 if (!bh) {
1187 ret = -EIO;
1188 goto error_close;
1190 disk_super = (struct btrfs_super_block *)bh->b_data;
1191 devid = le64_to_cpu(disk_super->dev_item.devid);
1192 dev_uuid = disk_super->dev_item.uuid;
1193 device = btrfs_find_device(root, devid, dev_uuid,
1194 disk_super->fsid);
1195 if (!device) {
1196 ret = -ENOENT;
1197 goto error_brelse;
1201 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1202 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1203 "device\n");
1204 ret = -EINVAL;
1205 goto error_brelse;
1208 if (device->writeable) {
1209 list_del_init(&device->dev_alloc_list);
1210 root->fs_info->fs_devices->rw_devices--;
1213 ret = btrfs_shrink_device(device, 0);
1214 if (ret)
1215 goto error_brelse;
1217 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1218 if (ret)
1219 goto error_brelse;
1221 device->in_fs_metadata = 0;
1224 * the device list mutex makes sure that we don't change
1225 * the device list while someone else is writing out all
1226 * the device supers.
1228 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1229 list_del_init(&device->dev_list);
1230 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1232 device->fs_devices->num_devices--;
1234 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1235 struct btrfs_device, dev_list);
1236 if (device->bdev == root->fs_info->sb->s_bdev)
1237 root->fs_info->sb->s_bdev = next_device->bdev;
1238 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1239 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1241 if (device->bdev) {
1242 close_bdev_exclusive(device->bdev, device->mode);
1243 device->bdev = NULL;
1244 device->fs_devices->open_devices--;
1247 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1248 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1250 if (device->fs_devices->open_devices == 0) {
1251 struct btrfs_fs_devices *fs_devices;
1252 fs_devices = root->fs_info->fs_devices;
1253 while (fs_devices) {
1254 if (fs_devices->seed == device->fs_devices)
1255 break;
1256 fs_devices = fs_devices->seed;
1258 fs_devices->seed = device->fs_devices->seed;
1259 device->fs_devices->seed = NULL;
1260 __btrfs_close_devices(device->fs_devices);
1261 free_fs_devices(device->fs_devices);
1265 * at this point, the device is zero sized. We want to
1266 * remove it from the devices list and zero out the old super
1268 if (device->writeable) {
1269 /* make sure this device isn't detected as part of
1270 * the FS anymore
1272 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1273 set_buffer_dirty(bh);
1274 sync_dirty_buffer(bh);
1277 kfree(device->name);
1278 kfree(device);
1279 ret = 0;
1281 error_brelse:
1282 brelse(bh);
1283 error_close:
1284 if (bdev)
1285 close_bdev_exclusive(bdev, FMODE_READ);
1286 out:
1287 mutex_unlock(&root->fs_info->volume_mutex);
1288 mutex_unlock(&uuid_mutex);
1289 return ret;
1293 * does all the dirty work required for changing file system's UUID.
1295 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1296 struct btrfs_root *root)
1298 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1299 struct btrfs_fs_devices *old_devices;
1300 struct btrfs_fs_devices *seed_devices;
1301 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1302 struct btrfs_device *device;
1303 u64 super_flags;
1305 BUG_ON(!mutex_is_locked(&uuid_mutex));
1306 if (!fs_devices->seeding)
1307 return -EINVAL;
1309 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1310 if (!seed_devices)
1311 return -ENOMEM;
1313 old_devices = clone_fs_devices(fs_devices);
1314 if (IS_ERR(old_devices)) {
1315 kfree(seed_devices);
1316 return PTR_ERR(old_devices);
1319 list_add(&old_devices->list, &fs_uuids);
1321 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1322 seed_devices->opened = 1;
1323 INIT_LIST_HEAD(&seed_devices->devices);
1324 INIT_LIST_HEAD(&seed_devices->alloc_list);
1325 mutex_init(&seed_devices->device_list_mutex);
1326 list_splice_init(&fs_devices->devices, &seed_devices->devices);
1327 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1328 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1329 device->fs_devices = seed_devices;
1332 fs_devices->seeding = 0;
1333 fs_devices->num_devices = 0;
1334 fs_devices->open_devices = 0;
1335 fs_devices->seed = seed_devices;
1337 generate_random_uuid(fs_devices->fsid);
1338 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1339 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1340 super_flags = btrfs_super_flags(disk_super) &
1341 ~BTRFS_SUPER_FLAG_SEEDING;
1342 btrfs_set_super_flags(disk_super, super_flags);
1344 return 0;
1348 * strore the expected generation for seed devices in device items.
1350 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1351 struct btrfs_root *root)
1353 struct btrfs_path *path;
1354 struct extent_buffer *leaf;
1355 struct btrfs_dev_item *dev_item;
1356 struct btrfs_device *device;
1357 struct btrfs_key key;
1358 u8 fs_uuid[BTRFS_UUID_SIZE];
1359 u8 dev_uuid[BTRFS_UUID_SIZE];
1360 u64 devid;
1361 int ret;
1363 path = btrfs_alloc_path();
1364 if (!path)
1365 return -ENOMEM;
1367 root = root->fs_info->chunk_root;
1368 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1369 key.offset = 0;
1370 key.type = BTRFS_DEV_ITEM_KEY;
1372 while (1) {
1373 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1374 if (ret < 0)
1375 goto error;
1377 leaf = path->nodes[0];
1378 next_slot:
1379 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1380 ret = btrfs_next_leaf(root, path);
1381 if (ret > 0)
1382 break;
1383 if (ret < 0)
1384 goto error;
1385 leaf = path->nodes[0];
1386 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1387 btrfs_release_path(root, path);
1388 continue;
1391 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1392 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1393 key.type != BTRFS_DEV_ITEM_KEY)
1394 break;
1396 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1397 struct btrfs_dev_item);
1398 devid = btrfs_device_id(leaf, dev_item);
1399 read_extent_buffer(leaf, dev_uuid,
1400 (unsigned long)btrfs_device_uuid(dev_item),
1401 BTRFS_UUID_SIZE);
1402 read_extent_buffer(leaf, fs_uuid,
1403 (unsigned long)btrfs_device_fsid(dev_item),
1404 BTRFS_UUID_SIZE);
1405 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1406 BUG_ON(!device);
1408 if (device->fs_devices->seeding) {
1409 btrfs_set_device_generation(leaf, dev_item,
1410 device->generation);
1411 btrfs_mark_buffer_dirty(leaf);
1414 path->slots[0]++;
1415 goto next_slot;
1417 ret = 0;
1418 error:
1419 btrfs_free_path(path);
1420 return ret;
1423 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1425 struct btrfs_trans_handle *trans;
1426 struct btrfs_device *device;
1427 struct block_device *bdev;
1428 struct list_head *devices;
1429 struct super_block *sb = root->fs_info->sb;
1430 u64 total_bytes;
1431 int seeding_dev = 0;
1432 int ret = 0;
1434 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1435 return -EINVAL;
1437 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1438 if (IS_ERR(bdev))
1439 return PTR_ERR(bdev);
1441 if (root->fs_info->fs_devices->seeding) {
1442 seeding_dev = 1;
1443 down_write(&sb->s_umount);
1444 mutex_lock(&uuid_mutex);
1447 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1448 mutex_lock(&root->fs_info->volume_mutex);
1450 devices = &root->fs_info->fs_devices->devices;
1452 * we have the volume lock, so we don't need the extra
1453 * device list mutex while reading the list here.
1455 list_for_each_entry(device, devices, dev_list) {
1456 if (device->bdev == bdev) {
1457 ret = -EEXIST;
1458 goto error;
1462 device = kzalloc(sizeof(*device), GFP_NOFS);
1463 if (!device) {
1464 /* we can safely leave the fs_devices entry around */
1465 ret = -ENOMEM;
1466 goto error;
1469 device->name = kstrdup(device_path, GFP_NOFS);
1470 if (!device->name) {
1471 kfree(device);
1472 ret = -ENOMEM;
1473 goto error;
1476 ret = find_next_devid(root, &device->devid);
1477 if (ret) {
1478 kfree(device);
1479 goto error;
1482 trans = btrfs_start_transaction(root, 1);
1483 lock_chunks(root);
1485 device->barriers = 1;
1486 device->writeable = 1;
1487 device->work.func = pending_bios_fn;
1488 generate_random_uuid(device->uuid);
1489 spin_lock_init(&device->io_lock);
1490 device->generation = trans->transid;
1491 device->io_width = root->sectorsize;
1492 device->io_align = root->sectorsize;
1493 device->sector_size = root->sectorsize;
1494 device->total_bytes = i_size_read(bdev->bd_inode);
1495 device->disk_total_bytes = device->total_bytes;
1496 device->dev_root = root->fs_info->dev_root;
1497 device->bdev = bdev;
1498 device->in_fs_metadata = 1;
1499 device->mode = 0;
1500 set_blocksize(device->bdev, 4096);
1502 if (seeding_dev) {
1503 sb->s_flags &= ~MS_RDONLY;
1504 ret = btrfs_prepare_sprout(trans, root);
1505 BUG_ON(ret);
1508 device->fs_devices = root->fs_info->fs_devices;
1511 * we don't want write_supers to jump in here with our device
1512 * half setup
1514 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1515 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1516 list_add(&device->dev_alloc_list,
1517 &root->fs_info->fs_devices->alloc_list);
1518 root->fs_info->fs_devices->num_devices++;
1519 root->fs_info->fs_devices->open_devices++;
1520 root->fs_info->fs_devices->rw_devices++;
1521 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1523 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1524 root->fs_info->fs_devices->rotating = 1;
1526 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1527 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1528 total_bytes + device->total_bytes);
1530 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1531 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1532 total_bytes + 1);
1533 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1535 if (seeding_dev) {
1536 ret = init_first_rw_device(trans, root, device);
1537 BUG_ON(ret);
1538 ret = btrfs_finish_sprout(trans, root);
1539 BUG_ON(ret);
1540 } else {
1541 ret = btrfs_add_device(trans, root, device);
1545 * we've got more storage, clear any full flags on the space
1546 * infos
1548 btrfs_clear_space_info_full(root->fs_info);
1550 unlock_chunks(root);
1551 btrfs_commit_transaction(trans, root);
1553 if (seeding_dev) {
1554 mutex_unlock(&uuid_mutex);
1555 up_write(&sb->s_umount);
1557 ret = btrfs_relocate_sys_chunks(root);
1558 BUG_ON(ret);
1560 out:
1561 mutex_unlock(&root->fs_info->volume_mutex);
1562 return ret;
1563 error:
1564 close_bdev_exclusive(bdev, 0);
1565 if (seeding_dev) {
1566 mutex_unlock(&uuid_mutex);
1567 up_write(&sb->s_umount);
1569 goto out;
1572 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1573 struct btrfs_device *device)
1575 int ret;
1576 struct btrfs_path *path;
1577 struct btrfs_root *root;
1578 struct btrfs_dev_item *dev_item;
1579 struct extent_buffer *leaf;
1580 struct btrfs_key key;
1582 root = device->dev_root->fs_info->chunk_root;
1584 path = btrfs_alloc_path();
1585 if (!path)
1586 return -ENOMEM;
1588 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1589 key.type = BTRFS_DEV_ITEM_KEY;
1590 key.offset = device->devid;
1592 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1593 if (ret < 0)
1594 goto out;
1596 if (ret > 0) {
1597 ret = -ENOENT;
1598 goto out;
1601 leaf = path->nodes[0];
1602 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1604 btrfs_set_device_id(leaf, dev_item, device->devid);
1605 btrfs_set_device_type(leaf, dev_item, device->type);
1606 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1607 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1608 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1609 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1610 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1611 btrfs_mark_buffer_dirty(leaf);
1613 out:
1614 btrfs_free_path(path);
1615 return ret;
1618 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1619 struct btrfs_device *device, u64 new_size)
1621 struct btrfs_super_block *super_copy =
1622 &device->dev_root->fs_info->super_copy;
1623 u64 old_total = btrfs_super_total_bytes(super_copy);
1624 u64 diff = new_size - device->total_bytes;
1626 if (!device->writeable)
1627 return -EACCES;
1628 if (new_size <= device->total_bytes)
1629 return -EINVAL;
1631 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1632 device->fs_devices->total_rw_bytes += diff;
1634 device->total_bytes = new_size;
1635 device->disk_total_bytes = new_size;
1636 btrfs_clear_space_info_full(device->dev_root->fs_info);
1638 return btrfs_update_device(trans, device);
1641 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1642 struct btrfs_device *device, u64 new_size)
1644 int ret;
1645 lock_chunks(device->dev_root);
1646 ret = __btrfs_grow_device(trans, device, new_size);
1647 unlock_chunks(device->dev_root);
1648 return ret;
1651 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1652 struct btrfs_root *root,
1653 u64 chunk_tree, u64 chunk_objectid,
1654 u64 chunk_offset)
1656 int ret;
1657 struct btrfs_path *path;
1658 struct btrfs_key key;
1660 root = root->fs_info->chunk_root;
1661 path = btrfs_alloc_path();
1662 if (!path)
1663 return -ENOMEM;
1665 key.objectid = chunk_objectid;
1666 key.offset = chunk_offset;
1667 key.type = BTRFS_CHUNK_ITEM_KEY;
1669 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1670 BUG_ON(ret);
1672 ret = btrfs_del_item(trans, root, path);
1673 BUG_ON(ret);
1675 btrfs_free_path(path);
1676 return 0;
1679 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1680 chunk_offset)
1682 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1683 struct btrfs_disk_key *disk_key;
1684 struct btrfs_chunk *chunk;
1685 u8 *ptr;
1686 int ret = 0;
1687 u32 num_stripes;
1688 u32 array_size;
1689 u32 len = 0;
1690 u32 cur;
1691 struct btrfs_key key;
1693 array_size = btrfs_super_sys_array_size(super_copy);
1695 ptr = super_copy->sys_chunk_array;
1696 cur = 0;
1698 while (cur < array_size) {
1699 disk_key = (struct btrfs_disk_key *)ptr;
1700 btrfs_disk_key_to_cpu(&key, disk_key);
1702 len = sizeof(*disk_key);
1704 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1705 chunk = (struct btrfs_chunk *)(ptr + len);
1706 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1707 len += btrfs_chunk_item_size(num_stripes);
1708 } else {
1709 ret = -EIO;
1710 break;
1712 if (key.objectid == chunk_objectid &&
1713 key.offset == chunk_offset) {
1714 memmove(ptr, ptr + len, array_size - (cur + len));
1715 array_size -= len;
1716 btrfs_set_super_sys_array_size(super_copy, array_size);
1717 } else {
1718 ptr += len;
1719 cur += len;
1722 return ret;
1725 static int btrfs_relocate_chunk(struct btrfs_root *root,
1726 u64 chunk_tree, u64 chunk_objectid,
1727 u64 chunk_offset)
1729 struct extent_map_tree *em_tree;
1730 struct btrfs_root *extent_root;
1731 struct btrfs_trans_handle *trans;
1732 struct extent_map *em;
1733 struct map_lookup *map;
1734 int ret;
1735 int i;
1737 root = root->fs_info->chunk_root;
1738 extent_root = root->fs_info->extent_root;
1739 em_tree = &root->fs_info->mapping_tree.map_tree;
1741 ret = btrfs_can_relocate(extent_root, chunk_offset);
1742 if (ret)
1743 return -ENOSPC;
1745 /* step one, relocate all the extents inside this chunk */
1746 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1747 BUG_ON(ret);
1749 trans = btrfs_start_transaction(root, 1);
1750 BUG_ON(!trans);
1752 lock_chunks(root);
1755 * step two, delete the device extents and the
1756 * chunk tree entries
1758 read_lock(&em_tree->lock);
1759 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1760 read_unlock(&em_tree->lock);
1762 BUG_ON(em->start > chunk_offset ||
1763 em->start + em->len < chunk_offset);
1764 map = (struct map_lookup *)em->bdev;
1766 for (i = 0; i < map->num_stripes; i++) {
1767 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1768 map->stripes[i].physical);
1769 BUG_ON(ret);
1771 if (map->stripes[i].dev) {
1772 ret = btrfs_update_device(trans, map->stripes[i].dev);
1773 BUG_ON(ret);
1776 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1777 chunk_offset);
1779 BUG_ON(ret);
1781 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1782 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1783 BUG_ON(ret);
1786 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1787 BUG_ON(ret);
1789 write_lock(&em_tree->lock);
1790 remove_extent_mapping(em_tree, em);
1791 write_unlock(&em_tree->lock);
1793 kfree(map);
1794 em->bdev = NULL;
1796 /* once for the tree */
1797 free_extent_map(em);
1798 /* once for us */
1799 free_extent_map(em);
1801 unlock_chunks(root);
1802 btrfs_end_transaction(trans, root);
1803 return 0;
1806 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1808 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1809 struct btrfs_path *path;
1810 struct extent_buffer *leaf;
1811 struct btrfs_chunk *chunk;
1812 struct btrfs_key key;
1813 struct btrfs_key found_key;
1814 u64 chunk_tree = chunk_root->root_key.objectid;
1815 u64 chunk_type;
1816 bool retried = false;
1817 int failed = 0;
1818 int ret;
1820 path = btrfs_alloc_path();
1821 if (!path)
1822 return -ENOMEM;
1824 again:
1825 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1826 key.offset = (u64)-1;
1827 key.type = BTRFS_CHUNK_ITEM_KEY;
1829 while (1) {
1830 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1831 if (ret < 0)
1832 goto error;
1833 BUG_ON(ret == 0);
1835 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1836 key.type);
1837 if (ret < 0)
1838 goto error;
1839 if (ret > 0)
1840 break;
1842 leaf = path->nodes[0];
1843 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1845 chunk = btrfs_item_ptr(leaf, path->slots[0],
1846 struct btrfs_chunk);
1847 chunk_type = btrfs_chunk_type(leaf, chunk);
1848 btrfs_release_path(chunk_root, path);
1850 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1851 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1852 found_key.objectid,
1853 found_key.offset);
1854 if (ret == -ENOSPC)
1855 failed++;
1856 else if (ret)
1857 BUG();
1860 if (found_key.offset == 0)
1861 break;
1862 key.offset = found_key.offset - 1;
1864 ret = 0;
1865 if (failed && !retried) {
1866 failed = 0;
1867 retried = true;
1868 goto again;
1869 } else if (failed && retried) {
1870 WARN_ON(1);
1871 ret = -ENOSPC;
1873 error:
1874 btrfs_free_path(path);
1875 return ret;
1878 static u64 div_factor(u64 num, int factor)
1880 if (factor == 10)
1881 return num;
1882 num *= factor;
1883 do_div(num, 10);
1884 return num;
1887 int btrfs_balance(struct btrfs_root *dev_root)
1889 int ret;
1890 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1891 struct btrfs_device *device;
1892 u64 old_size;
1893 u64 size_to_free;
1894 struct btrfs_path *path;
1895 struct btrfs_key key;
1896 struct btrfs_chunk *chunk;
1897 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1898 struct btrfs_trans_handle *trans;
1899 struct btrfs_key found_key;
1901 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1902 return -EROFS;
1904 if (!capable(CAP_SYS_ADMIN))
1905 return -EPERM;
1907 mutex_lock(&dev_root->fs_info->volume_mutex);
1908 dev_root = dev_root->fs_info->dev_root;
1910 /* step one make some room on all the devices */
1911 list_for_each_entry(device, devices, dev_list) {
1912 old_size = device->total_bytes;
1913 size_to_free = div_factor(old_size, 1);
1914 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1915 if (!device->writeable ||
1916 device->total_bytes - device->bytes_used > size_to_free)
1917 continue;
1919 ret = btrfs_shrink_device(device, old_size - size_to_free);
1920 if (ret == -ENOSPC)
1921 break;
1922 BUG_ON(ret);
1924 trans = btrfs_start_transaction(dev_root, 1);
1925 BUG_ON(!trans);
1927 ret = btrfs_grow_device(trans, device, old_size);
1928 BUG_ON(ret);
1930 btrfs_end_transaction(trans, dev_root);
1933 /* step two, relocate all the chunks */
1934 path = btrfs_alloc_path();
1935 BUG_ON(!path);
1937 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1938 key.offset = (u64)-1;
1939 key.type = BTRFS_CHUNK_ITEM_KEY;
1941 while (1) {
1942 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1943 if (ret < 0)
1944 goto error;
1947 * this shouldn't happen, it means the last relocate
1948 * failed
1950 if (ret == 0)
1951 break;
1953 ret = btrfs_previous_item(chunk_root, path, 0,
1954 BTRFS_CHUNK_ITEM_KEY);
1955 if (ret)
1956 break;
1958 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1959 path->slots[0]);
1960 if (found_key.objectid != key.objectid)
1961 break;
1963 chunk = btrfs_item_ptr(path->nodes[0],
1964 path->slots[0],
1965 struct btrfs_chunk);
1966 /* chunk zero is special */
1967 if (found_key.offset == 0)
1968 break;
1970 btrfs_release_path(chunk_root, path);
1971 ret = btrfs_relocate_chunk(chunk_root,
1972 chunk_root->root_key.objectid,
1973 found_key.objectid,
1974 found_key.offset);
1975 BUG_ON(ret && ret != -ENOSPC);
1976 key.offset = found_key.offset - 1;
1978 ret = 0;
1979 error:
1980 btrfs_free_path(path);
1981 mutex_unlock(&dev_root->fs_info->volume_mutex);
1982 return ret;
1986 * shrinking a device means finding all of the device extents past
1987 * the new size, and then following the back refs to the chunks.
1988 * The chunk relocation code actually frees the device extent
1990 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1992 struct btrfs_trans_handle *trans;
1993 struct btrfs_root *root = device->dev_root;
1994 struct btrfs_dev_extent *dev_extent = NULL;
1995 struct btrfs_path *path;
1996 u64 length;
1997 u64 chunk_tree;
1998 u64 chunk_objectid;
1999 u64 chunk_offset;
2000 int ret;
2001 int slot;
2002 int failed = 0;
2003 bool retried = false;
2004 struct extent_buffer *l;
2005 struct btrfs_key key;
2006 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2007 u64 old_total = btrfs_super_total_bytes(super_copy);
2008 u64 old_size = device->total_bytes;
2009 u64 diff = device->total_bytes - new_size;
2011 if (new_size >= device->total_bytes)
2012 return -EINVAL;
2014 path = btrfs_alloc_path();
2015 if (!path)
2016 return -ENOMEM;
2018 path->reada = 2;
2020 lock_chunks(root);
2022 device->total_bytes = new_size;
2023 if (device->writeable)
2024 device->fs_devices->total_rw_bytes -= diff;
2025 unlock_chunks(root);
2027 again:
2028 key.objectid = device->devid;
2029 key.offset = (u64)-1;
2030 key.type = BTRFS_DEV_EXTENT_KEY;
2032 while (1) {
2033 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2034 if (ret < 0)
2035 goto done;
2037 ret = btrfs_previous_item(root, path, 0, key.type);
2038 if (ret < 0)
2039 goto done;
2040 if (ret) {
2041 ret = 0;
2042 btrfs_release_path(root, path);
2043 break;
2046 l = path->nodes[0];
2047 slot = path->slots[0];
2048 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2050 if (key.objectid != device->devid) {
2051 btrfs_release_path(root, path);
2052 break;
2055 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2056 length = btrfs_dev_extent_length(l, dev_extent);
2058 if (key.offset + length <= new_size) {
2059 btrfs_release_path(root, path);
2060 break;
2063 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2064 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2065 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2066 btrfs_release_path(root, path);
2068 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2069 chunk_offset);
2070 if (ret && ret != -ENOSPC)
2071 goto done;
2072 if (ret == -ENOSPC)
2073 failed++;
2074 key.offset -= 1;
2077 if (failed && !retried) {
2078 failed = 0;
2079 retried = true;
2080 goto again;
2081 } else if (failed && retried) {
2082 ret = -ENOSPC;
2083 lock_chunks(root);
2085 device->total_bytes = old_size;
2086 if (device->writeable)
2087 device->fs_devices->total_rw_bytes += diff;
2088 unlock_chunks(root);
2089 goto done;
2092 /* Shrinking succeeded, else we would be at "done". */
2093 trans = btrfs_start_transaction(root, 1);
2094 if (!trans) {
2095 ret = -ENOMEM;
2096 goto done;
2098 lock_chunks(root);
2100 device->disk_total_bytes = new_size;
2101 /* Now btrfs_update_device() will change the on-disk size. */
2102 ret = btrfs_update_device(trans, device);
2103 if (ret) {
2104 unlock_chunks(root);
2105 btrfs_end_transaction(trans, root);
2106 goto done;
2108 WARN_ON(diff > old_total);
2109 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2110 unlock_chunks(root);
2111 btrfs_end_transaction(trans, root);
2112 done:
2113 btrfs_free_path(path);
2114 return ret;
2117 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2118 struct btrfs_root *root,
2119 struct btrfs_key *key,
2120 struct btrfs_chunk *chunk, int item_size)
2122 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2123 struct btrfs_disk_key disk_key;
2124 u32 array_size;
2125 u8 *ptr;
2127 array_size = btrfs_super_sys_array_size(super_copy);
2128 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2129 return -EFBIG;
2131 ptr = super_copy->sys_chunk_array + array_size;
2132 btrfs_cpu_key_to_disk(&disk_key, key);
2133 memcpy(ptr, &disk_key, sizeof(disk_key));
2134 ptr += sizeof(disk_key);
2135 memcpy(ptr, chunk, item_size);
2136 item_size += sizeof(disk_key);
2137 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2138 return 0;
2141 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
2142 int num_stripes, int sub_stripes)
2144 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
2145 return calc_size;
2146 else if (type & BTRFS_BLOCK_GROUP_RAID10)
2147 return calc_size * (num_stripes / sub_stripes);
2148 else
2149 return calc_size * num_stripes;
2152 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2153 struct btrfs_root *extent_root,
2154 struct map_lookup **map_ret,
2155 u64 *num_bytes, u64 *stripe_size,
2156 u64 start, u64 type)
2158 struct btrfs_fs_info *info = extent_root->fs_info;
2159 struct btrfs_device *device = NULL;
2160 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2161 struct list_head *cur;
2162 struct map_lookup *map = NULL;
2163 struct extent_map_tree *em_tree;
2164 struct extent_map *em;
2165 struct list_head private_devs;
2166 int min_stripe_size = 1 * 1024 * 1024;
2167 u64 calc_size = 1024 * 1024 * 1024;
2168 u64 max_chunk_size = calc_size;
2169 u64 min_free;
2170 u64 avail;
2171 u64 max_avail = 0;
2172 u64 dev_offset;
2173 int num_stripes = 1;
2174 int min_stripes = 1;
2175 int sub_stripes = 0;
2176 int looped = 0;
2177 int ret;
2178 int index;
2179 int stripe_len = 64 * 1024;
2181 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2182 (type & BTRFS_BLOCK_GROUP_DUP)) {
2183 WARN_ON(1);
2184 type &= ~BTRFS_BLOCK_GROUP_DUP;
2186 if (list_empty(&fs_devices->alloc_list))
2187 return -ENOSPC;
2189 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2190 num_stripes = fs_devices->rw_devices;
2191 min_stripes = 2;
2193 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2194 num_stripes = 2;
2195 min_stripes = 2;
2197 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2198 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
2199 if (num_stripes < 2)
2200 return -ENOSPC;
2201 min_stripes = 2;
2203 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2204 num_stripes = fs_devices->rw_devices;
2205 if (num_stripes < 4)
2206 return -ENOSPC;
2207 num_stripes &= ~(u32)1;
2208 sub_stripes = 2;
2209 min_stripes = 4;
2212 if (type & BTRFS_BLOCK_GROUP_DATA) {
2213 max_chunk_size = 10 * calc_size;
2214 min_stripe_size = 64 * 1024 * 1024;
2215 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2216 max_chunk_size = 256 * 1024 * 1024;
2217 min_stripe_size = 32 * 1024 * 1024;
2218 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2219 calc_size = 8 * 1024 * 1024;
2220 max_chunk_size = calc_size * 2;
2221 min_stripe_size = 1 * 1024 * 1024;
2224 /* we don't want a chunk larger than 10% of writeable space */
2225 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2226 max_chunk_size);
2228 again:
2229 max_avail = 0;
2230 if (!map || map->num_stripes != num_stripes) {
2231 kfree(map);
2232 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2233 if (!map)
2234 return -ENOMEM;
2235 map->num_stripes = num_stripes;
2238 if (calc_size * num_stripes > max_chunk_size) {
2239 calc_size = max_chunk_size;
2240 do_div(calc_size, num_stripes);
2241 do_div(calc_size, stripe_len);
2242 calc_size *= stripe_len;
2244 /* we don't want tiny stripes */
2245 calc_size = max_t(u64, min_stripe_size, calc_size);
2247 do_div(calc_size, stripe_len);
2248 calc_size *= stripe_len;
2250 cur = fs_devices->alloc_list.next;
2251 index = 0;
2253 if (type & BTRFS_BLOCK_GROUP_DUP)
2254 min_free = calc_size * 2;
2255 else
2256 min_free = calc_size;
2259 * we add 1MB because we never use the first 1MB of the device, unless
2260 * we've looped, then we are likely allocating the maximum amount of
2261 * space left already
2263 if (!looped)
2264 min_free += 1024 * 1024;
2266 INIT_LIST_HEAD(&private_devs);
2267 while (index < num_stripes) {
2268 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2269 BUG_ON(!device->writeable);
2270 if (device->total_bytes > device->bytes_used)
2271 avail = device->total_bytes - device->bytes_used;
2272 else
2273 avail = 0;
2274 cur = cur->next;
2276 if (device->in_fs_metadata && avail >= min_free) {
2277 ret = find_free_dev_extent(trans, device,
2278 min_free, &dev_offset,
2279 &max_avail);
2280 if (ret == 0) {
2281 list_move_tail(&device->dev_alloc_list,
2282 &private_devs);
2283 map->stripes[index].dev = device;
2284 map->stripes[index].physical = dev_offset;
2285 index++;
2286 if (type & BTRFS_BLOCK_GROUP_DUP) {
2287 map->stripes[index].dev = device;
2288 map->stripes[index].physical =
2289 dev_offset + calc_size;
2290 index++;
2293 } else if (device->in_fs_metadata && avail > max_avail)
2294 max_avail = avail;
2295 if (cur == &fs_devices->alloc_list)
2296 break;
2298 list_splice(&private_devs, &fs_devices->alloc_list);
2299 if (index < num_stripes) {
2300 if (index >= min_stripes) {
2301 num_stripes = index;
2302 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2303 num_stripes /= sub_stripes;
2304 num_stripes *= sub_stripes;
2306 looped = 1;
2307 goto again;
2309 if (!looped && max_avail > 0) {
2310 looped = 1;
2311 calc_size = max_avail;
2312 goto again;
2314 kfree(map);
2315 return -ENOSPC;
2317 map->sector_size = extent_root->sectorsize;
2318 map->stripe_len = stripe_len;
2319 map->io_align = stripe_len;
2320 map->io_width = stripe_len;
2321 map->type = type;
2322 map->num_stripes = num_stripes;
2323 map->sub_stripes = sub_stripes;
2325 *map_ret = map;
2326 *stripe_size = calc_size;
2327 *num_bytes = chunk_bytes_by_type(type, calc_size,
2328 num_stripes, sub_stripes);
2330 em = alloc_extent_map(GFP_NOFS);
2331 if (!em) {
2332 kfree(map);
2333 return -ENOMEM;
2335 em->bdev = (struct block_device *)map;
2336 em->start = start;
2337 em->len = *num_bytes;
2338 em->block_start = 0;
2339 em->block_len = em->len;
2341 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2342 write_lock(&em_tree->lock);
2343 ret = add_extent_mapping(em_tree, em);
2344 write_unlock(&em_tree->lock);
2345 BUG_ON(ret);
2346 free_extent_map(em);
2348 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2349 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2350 start, *num_bytes);
2351 BUG_ON(ret);
2353 index = 0;
2354 while (index < map->num_stripes) {
2355 device = map->stripes[index].dev;
2356 dev_offset = map->stripes[index].physical;
2358 ret = btrfs_alloc_dev_extent(trans, device,
2359 info->chunk_root->root_key.objectid,
2360 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2361 start, dev_offset, calc_size);
2362 BUG_ON(ret);
2363 index++;
2366 return 0;
2369 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2370 struct btrfs_root *extent_root,
2371 struct map_lookup *map, u64 chunk_offset,
2372 u64 chunk_size, u64 stripe_size)
2374 u64 dev_offset;
2375 struct btrfs_key key;
2376 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2377 struct btrfs_device *device;
2378 struct btrfs_chunk *chunk;
2379 struct btrfs_stripe *stripe;
2380 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2381 int index = 0;
2382 int ret;
2384 chunk = kzalloc(item_size, GFP_NOFS);
2385 if (!chunk)
2386 return -ENOMEM;
2388 index = 0;
2389 while (index < map->num_stripes) {
2390 device = map->stripes[index].dev;
2391 device->bytes_used += stripe_size;
2392 ret = btrfs_update_device(trans, device);
2393 BUG_ON(ret);
2394 index++;
2397 index = 0;
2398 stripe = &chunk->stripe;
2399 while (index < map->num_stripes) {
2400 device = map->stripes[index].dev;
2401 dev_offset = map->stripes[index].physical;
2403 btrfs_set_stack_stripe_devid(stripe, device->devid);
2404 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2405 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2406 stripe++;
2407 index++;
2410 btrfs_set_stack_chunk_length(chunk, chunk_size);
2411 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2412 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2413 btrfs_set_stack_chunk_type(chunk, map->type);
2414 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2415 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2416 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2417 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2418 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2420 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2421 key.type = BTRFS_CHUNK_ITEM_KEY;
2422 key.offset = chunk_offset;
2424 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2425 BUG_ON(ret);
2427 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2428 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2429 item_size);
2430 BUG_ON(ret);
2432 kfree(chunk);
2433 return 0;
2437 * Chunk allocation falls into two parts. The first part does works
2438 * that make the new allocated chunk useable, but not do any operation
2439 * that modifies the chunk tree. The second part does the works that
2440 * require modifying the chunk tree. This division is important for the
2441 * bootstrap process of adding storage to a seed btrfs.
2443 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2444 struct btrfs_root *extent_root, u64 type)
2446 u64 chunk_offset;
2447 u64 chunk_size;
2448 u64 stripe_size;
2449 struct map_lookup *map;
2450 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2451 int ret;
2453 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2454 &chunk_offset);
2455 if (ret)
2456 return ret;
2458 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2459 &stripe_size, chunk_offset, type);
2460 if (ret)
2461 return ret;
2463 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2464 chunk_size, stripe_size);
2465 BUG_ON(ret);
2466 return 0;
2469 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2470 struct btrfs_root *root,
2471 struct btrfs_device *device)
2473 u64 chunk_offset;
2474 u64 sys_chunk_offset;
2475 u64 chunk_size;
2476 u64 sys_chunk_size;
2477 u64 stripe_size;
2478 u64 sys_stripe_size;
2479 u64 alloc_profile;
2480 struct map_lookup *map;
2481 struct map_lookup *sys_map;
2482 struct btrfs_fs_info *fs_info = root->fs_info;
2483 struct btrfs_root *extent_root = fs_info->extent_root;
2484 int ret;
2486 ret = find_next_chunk(fs_info->chunk_root,
2487 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2488 BUG_ON(ret);
2490 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2491 (fs_info->metadata_alloc_profile &
2492 fs_info->avail_metadata_alloc_bits);
2493 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2495 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2496 &stripe_size, chunk_offset, alloc_profile);
2497 BUG_ON(ret);
2499 sys_chunk_offset = chunk_offset + chunk_size;
2501 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2502 (fs_info->system_alloc_profile &
2503 fs_info->avail_system_alloc_bits);
2504 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2506 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2507 &sys_chunk_size, &sys_stripe_size,
2508 sys_chunk_offset, alloc_profile);
2509 BUG_ON(ret);
2511 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2512 BUG_ON(ret);
2515 * Modifying chunk tree needs allocating new blocks from both
2516 * system block group and metadata block group. So we only can
2517 * do operations require modifying the chunk tree after both
2518 * block groups were created.
2520 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2521 chunk_size, stripe_size);
2522 BUG_ON(ret);
2524 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2525 sys_chunk_offset, sys_chunk_size,
2526 sys_stripe_size);
2527 BUG_ON(ret);
2528 return 0;
2531 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2533 struct extent_map *em;
2534 struct map_lookup *map;
2535 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2536 int readonly = 0;
2537 int i;
2539 read_lock(&map_tree->map_tree.lock);
2540 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2541 read_unlock(&map_tree->map_tree.lock);
2542 if (!em)
2543 return 1;
2545 if (btrfs_test_opt(root, DEGRADED)) {
2546 free_extent_map(em);
2547 return 0;
2550 map = (struct map_lookup *)em->bdev;
2551 for (i = 0; i < map->num_stripes; i++) {
2552 if (!map->stripes[i].dev->writeable) {
2553 readonly = 1;
2554 break;
2557 free_extent_map(em);
2558 return readonly;
2561 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2563 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2566 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2568 struct extent_map *em;
2570 while (1) {
2571 write_lock(&tree->map_tree.lock);
2572 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2573 if (em)
2574 remove_extent_mapping(&tree->map_tree, em);
2575 write_unlock(&tree->map_tree.lock);
2576 if (!em)
2577 break;
2578 kfree(em->bdev);
2579 /* once for us */
2580 free_extent_map(em);
2581 /* once for the tree */
2582 free_extent_map(em);
2586 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2588 struct extent_map *em;
2589 struct map_lookup *map;
2590 struct extent_map_tree *em_tree = &map_tree->map_tree;
2591 int ret;
2593 read_lock(&em_tree->lock);
2594 em = lookup_extent_mapping(em_tree, logical, len);
2595 read_unlock(&em_tree->lock);
2596 BUG_ON(!em);
2598 BUG_ON(em->start > logical || em->start + em->len < logical);
2599 map = (struct map_lookup *)em->bdev;
2600 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2601 ret = map->num_stripes;
2602 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2603 ret = map->sub_stripes;
2604 else
2605 ret = 1;
2606 free_extent_map(em);
2607 return ret;
2610 static int find_live_mirror(struct map_lookup *map, int first, int num,
2611 int optimal)
2613 int i;
2614 if (map->stripes[optimal].dev->bdev)
2615 return optimal;
2616 for (i = first; i < first + num; i++) {
2617 if (map->stripes[i].dev->bdev)
2618 return i;
2620 /* we couldn't find one that doesn't fail. Just return something
2621 * and the io error handling code will clean up eventually
2623 return optimal;
2626 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2627 u64 logical, u64 *length,
2628 struct btrfs_multi_bio **multi_ret,
2629 int mirror_num, struct page *unplug_page)
2631 struct extent_map *em;
2632 struct map_lookup *map;
2633 struct extent_map_tree *em_tree = &map_tree->map_tree;
2634 u64 offset;
2635 u64 stripe_offset;
2636 u64 stripe_nr;
2637 int stripes_allocated = 8;
2638 int stripes_required = 1;
2639 int stripe_index;
2640 int i;
2641 int num_stripes;
2642 int max_errors = 0;
2643 struct btrfs_multi_bio *multi = NULL;
2645 if (multi_ret && !(rw & (1 << BIO_RW)))
2646 stripes_allocated = 1;
2647 again:
2648 if (multi_ret) {
2649 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2650 GFP_NOFS);
2651 if (!multi)
2652 return -ENOMEM;
2654 atomic_set(&multi->error, 0);
2657 read_lock(&em_tree->lock);
2658 em = lookup_extent_mapping(em_tree, logical, *length);
2659 read_unlock(&em_tree->lock);
2661 if (!em && unplug_page) {
2662 kfree(multi);
2663 return 0;
2666 if (!em) {
2667 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2668 (unsigned long long)logical,
2669 (unsigned long long)*length);
2670 BUG();
2673 BUG_ON(em->start > logical || em->start + em->len < logical);
2674 map = (struct map_lookup *)em->bdev;
2675 offset = logical - em->start;
2677 if (mirror_num > map->num_stripes)
2678 mirror_num = 0;
2680 /* if our multi bio struct is too small, back off and try again */
2681 if (rw & (1 << BIO_RW)) {
2682 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2683 BTRFS_BLOCK_GROUP_DUP)) {
2684 stripes_required = map->num_stripes;
2685 max_errors = 1;
2686 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2687 stripes_required = map->sub_stripes;
2688 max_errors = 1;
2691 if (multi_ret && (rw & (1 << BIO_RW)) &&
2692 stripes_allocated < stripes_required) {
2693 stripes_allocated = map->num_stripes;
2694 free_extent_map(em);
2695 kfree(multi);
2696 goto again;
2698 stripe_nr = offset;
2700 * stripe_nr counts the total number of stripes we have to stride
2701 * to get to this block
2703 do_div(stripe_nr, map->stripe_len);
2705 stripe_offset = stripe_nr * map->stripe_len;
2706 BUG_ON(offset < stripe_offset);
2708 /* stripe_offset is the offset of this block in its stripe*/
2709 stripe_offset = offset - stripe_offset;
2711 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2712 BTRFS_BLOCK_GROUP_RAID10 |
2713 BTRFS_BLOCK_GROUP_DUP)) {
2714 /* we limit the length of each bio to what fits in a stripe */
2715 *length = min_t(u64, em->len - offset,
2716 map->stripe_len - stripe_offset);
2717 } else {
2718 *length = em->len - offset;
2721 if (!multi_ret && !unplug_page)
2722 goto out;
2724 num_stripes = 1;
2725 stripe_index = 0;
2726 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2727 if (unplug_page || (rw & (1 << BIO_RW)))
2728 num_stripes = map->num_stripes;
2729 else if (mirror_num)
2730 stripe_index = mirror_num - 1;
2731 else {
2732 stripe_index = find_live_mirror(map, 0,
2733 map->num_stripes,
2734 current->pid % map->num_stripes);
2737 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2738 if (rw & (1 << BIO_RW))
2739 num_stripes = map->num_stripes;
2740 else if (mirror_num)
2741 stripe_index = mirror_num - 1;
2743 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2744 int factor = map->num_stripes / map->sub_stripes;
2746 stripe_index = do_div(stripe_nr, factor);
2747 stripe_index *= map->sub_stripes;
2749 if (unplug_page || (rw & (1 << BIO_RW)))
2750 num_stripes = map->sub_stripes;
2751 else if (mirror_num)
2752 stripe_index += mirror_num - 1;
2753 else {
2754 stripe_index = find_live_mirror(map, stripe_index,
2755 map->sub_stripes, stripe_index +
2756 current->pid % map->sub_stripes);
2758 } else {
2760 * after this do_div call, stripe_nr is the number of stripes
2761 * on this device we have to walk to find the data, and
2762 * stripe_index is the number of our device in the stripe array
2764 stripe_index = do_div(stripe_nr, map->num_stripes);
2766 BUG_ON(stripe_index >= map->num_stripes);
2768 for (i = 0; i < num_stripes; i++) {
2769 if (unplug_page) {
2770 struct btrfs_device *device;
2771 struct backing_dev_info *bdi;
2773 device = map->stripes[stripe_index].dev;
2774 if (device->bdev) {
2775 bdi = blk_get_backing_dev_info(device->bdev);
2776 if (bdi->unplug_io_fn)
2777 bdi->unplug_io_fn(bdi, unplug_page);
2779 } else {
2780 multi->stripes[i].physical =
2781 map->stripes[stripe_index].physical +
2782 stripe_offset + stripe_nr * map->stripe_len;
2783 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2785 stripe_index++;
2787 if (multi_ret) {
2788 *multi_ret = multi;
2789 multi->num_stripes = num_stripes;
2790 multi->max_errors = max_errors;
2792 out:
2793 free_extent_map(em);
2794 return 0;
2797 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2798 u64 logical, u64 *length,
2799 struct btrfs_multi_bio **multi_ret, int mirror_num)
2801 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2802 mirror_num, NULL);
2805 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2806 u64 chunk_start, u64 physical, u64 devid,
2807 u64 **logical, int *naddrs, int *stripe_len)
2809 struct extent_map_tree *em_tree = &map_tree->map_tree;
2810 struct extent_map *em;
2811 struct map_lookup *map;
2812 u64 *buf;
2813 u64 bytenr;
2814 u64 length;
2815 u64 stripe_nr;
2816 int i, j, nr = 0;
2818 read_lock(&em_tree->lock);
2819 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2820 read_unlock(&em_tree->lock);
2822 BUG_ON(!em || em->start != chunk_start);
2823 map = (struct map_lookup *)em->bdev;
2825 length = em->len;
2826 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2827 do_div(length, map->num_stripes / map->sub_stripes);
2828 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2829 do_div(length, map->num_stripes);
2831 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2832 BUG_ON(!buf);
2834 for (i = 0; i < map->num_stripes; i++) {
2835 if (devid && map->stripes[i].dev->devid != devid)
2836 continue;
2837 if (map->stripes[i].physical > physical ||
2838 map->stripes[i].physical + length <= physical)
2839 continue;
2841 stripe_nr = physical - map->stripes[i].physical;
2842 do_div(stripe_nr, map->stripe_len);
2844 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2845 stripe_nr = stripe_nr * map->num_stripes + i;
2846 do_div(stripe_nr, map->sub_stripes);
2847 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2848 stripe_nr = stripe_nr * map->num_stripes + i;
2850 bytenr = chunk_start + stripe_nr * map->stripe_len;
2851 WARN_ON(nr >= map->num_stripes);
2852 for (j = 0; j < nr; j++) {
2853 if (buf[j] == bytenr)
2854 break;
2856 if (j == nr) {
2857 WARN_ON(nr >= map->num_stripes);
2858 buf[nr++] = bytenr;
2862 *logical = buf;
2863 *naddrs = nr;
2864 *stripe_len = map->stripe_len;
2866 free_extent_map(em);
2867 return 0;
2870 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2871 u64 logical, struct page *page)
2873 u64 length = PAGE_CACHE_SIZE;
2874 return __btrfs_map_block(map_tree, READ, logical, &length,
2875 NULL, 0, page);
2878 static void end_bio_multi_stripe(struct bio *bio, int err)
2880 struct btrfs_multi_bio *multi = bio->bi_private;
2881 int is_orig_bio = 0;
2883 if (err)
2884 atomic_inc(&multi->error);
2886 if (bio == multi->orig_bio)
2887 is_orig_bio = 1;
2889 if (atomic_dec_and_test(&multi->stripes_pending)) {
2890 if (!is_orig_bio) {
2891 bio_put(bio);
2892 bio = multi->orig_bio;
2894 bio->bi_private = multi->private;
2895 bio->bi_end_io = multi->end_io;
2896 /* only send an error to the higher layers if it is
2897 * beyond the tolerance of the multi-bio
2899 if (atomic_read(&multi->error) > multi->max_errors) {
2900 err = -EIO;
2901 } else if (err) {
2903 * this bio is actually up to date, we didn't
2904 * go over the max number of errors
2906 set_bit(BIO_UPTODATE, &bio->bi_flags);
2907 err = 0;
2909 kfree(multi);
2911 bio_endio(bio, err);
2912 } else if (!is_orig_bio) {
2913 bio_put(bio);
2917 struct async_sched {
2918 struct bio *bio;
2919 int rw;
2920 struct btrfs_fs_info *info;
2921 struct btrfs_work work;
2925 * see run_scheduled_bios for a description of why bios are collected for
2926 * async submit.
2928 * This will add one bio to the pending list for a device and make sure
2929 * the work struct is scheduled.
2931 static noinline int schedule_bio(struct btrfs_root *root,
2932 struct btrfs_device *device,
2933 int rw, struct bio *bio)
2935 int should_queue = 1;
2936 struct btrfs_pending_bios *pending_bios;
2938 /* don't bother with additional async steps for reads, right now */
2939 if (!(rw & (1 << BIO_RW))) {
2940 bio_get(bio);
2941 submit_bio(rw, bio);
2942 bio_put(bio);
2943 return 0;
2947 * nr_async_bios allows us to reliably return congestion to the
2948 * higher layers. Otherwise, the async bio makes it appear we have
2949 * made progress against dirty pages when we've really just put it
2950 * on a queue for later
2952 atomic_inc(&root->fs_info->nr_async_bios);
2953 WARN_ON(bio->bi_next);
2954 bio->bi_next = NULL;
2955 bio->bi_rw |= rw;
2957 spin_lock(&device->io_lock);
2958 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
2959 pending_bios = &device->pending_sync_bios;
2960 else
2961 pending_bios = &device->pending_bios;
2963 if (pending_bios->tail)
2964 pending_bios->tail->bi_next = bio;
2966 pending_bios->tail = bio;
2967 if (!pending_bios->head)
2968 pending_bios->head = bio;
2969 if (device->running_pending)
2970 should_queue = 0;
2972 spin_unlock(&device->io_lock);
2974 if (should_queue)
2975 btrfs_queue_worker(&root->fs_info->submit_workers,
2976 &device->work);
2977 return 0;
2980 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2981 int mirror_num, int async_submit)
2983 struct btrfs_mapping_tree *map_tree;
2984 struct btrfs_device *dev;
2985 struct bio *first_bio = bio;
2986 u64 logical = (u64)bio->bi_sector << 9;
2987 u64 length = 0;
2988 u64 map_length;
2989 struct btrfs_multi_bio *multi = NULL;
2990 int ret;
2991 int dev_nr = 0;
2992 int total_devs = 1;
2994 length = bio->bi_size;
2995 map_tree = &root->fs_info->mapping_tree;
2996 map_length = length;
2998 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2999 mirror_num);
3000 BUG_ON(ret);
3002 total_devs = multi->num_stripes;
3003 if (map_length < length) {
3004 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3005 "len %llu\n", (unsigned long long)logical,
3006 (unsigned long long)length,
3007 (unsigned long long)map_length);
3008 BUG();
3010 multi->end_io = first_bio->bi_end_io;
3011 multi->private = first_bio->bi_private;
3012 multi->orig_bio = first_bio;
3013 atomic_set(&multi->stripes_pending, multi->num_stripes);
3015 while (dev_nr < total_devs) {
3016 if (total_devs > 1) {
3017 if (dev_nr < total_devs - 1) {
3018 bio = bio_clone(first_bio, GFP_NOFS);
3019 BUG_ON(!bio);
3020 } else {
3021 bio = first_bio;
3023 bio->bi_private = multi;
3024 bio->bi_end_io = end_bio_multi_stripe;
3026 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3027 dev = multi->stripes[dev_nr].dev;
3028 BUG_ON(rw == WRITE && !dev->writeable);
3029 if (dev && dev->bdev) {
3030 bio->bi_bdev = dev->bdev;
3031 if (async_submit)
3032 schedule_bio(root, dev, rw, bio);
3033 else
3034 submit_bio(rw, bio);
3035 } else {
3036 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3037 bio->bi_sector = logical >> 9;
3038 bio_endio(bio, -EIO);
3040 dev_nr++;
3042 if (total_devs == 1)
3043 kfree(multi);
3044 return 0;
3047 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3048 u8 *uuid, u8 *fsid)
3050 struct btrfs_device *device;
3051 struct btrfs_fs_devices *cur_devices;
3053 cur_devices = root->fs_info->fs_devices;
3054 while (cur_devices) {
3055 if (!fsid ||
3056 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3057 device = __find_device(&cur_devices->devices,
3058 devid, uuid);
3059 if (device)
3060 return device;
3062 cur_devices = cur_devices->seed;
3064 return NULL;
3067 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3068 u64 devid, u8 *dev_uuid)
3070 struct btrfs_device *device;
3071 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3073 device = kzalloc(sizeof(*device), GFP_NOFS);
3074 if (!device)
3075 return NULL;
3076 list_add(&device->dev_list,
3077 &fs_devices->devices);
3078 device->barriers = 1;
3079 device->dev_root = root->fs_info->dev_root;
3080 device->devid = devid;
3081 device->work.func = pending_bios_fn;
3082 device->fs_devices = fs_devices;
3083 fs_devices->num_devices++;
3084 spin_lock_init(&device->io_lock);
3085 INIT_LIST_HEAD(&device->dev_alloc_list);
3086 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3087 return device;
3090 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3091 struct extent_buffer *leaf,
3092 struct btrfs_chunk *chunk)
3094 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3095 struct map_lookup *map;
3096 struct extent_map *em;
3097 u64 logical;
3098 u64 length;
3099 u64 devid;
3100 u8 uuid[BTRFS_UUID_SIZE];
3101 int num_stripes;
3102 int ret;
3103 int i;
3105 logical = key->offset;
3106 length = btrfs_chunk_length(leaf, chunk);
3108 read_lock(&map_tree->map_tree.lock);
3109 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3110 read_unlock(&map_tree->map_tree.lock);
3112 /* already mapped? */
3113 if (em && em->start <= logical && em->start + em->len > logical) {
3114 free_extent_map(em);
3115 return 0;
3116 } else if (em) {
3117 free_extent_map(em);
3120 em = alloc_extent_map(GFP_NOFS);
3121 if (!em)
3122 return -ENOMEM;
3123 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3124 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3125 if (!map) {
3126 free_extent_map(em);
3127 return -ENOMEM;
3130 em->bdev = (struct block_device *)map;
3131 em->start = logical;
3132 em->len = length;
3133 em->block_start = 0;
3134 em->block_len = em->len;
3136 map->num_stripes = num_stripes;
3137 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3138 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3139 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3140 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3141 map->type = btrfs_chunk_type(leaf, chunk);
3142 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3143 for (i = 0; i < num_stripes; i++) {
3144 map->stripes[i].physical =
3145 btrfs_stripe_offset_nr(leaf, chunk, i);
3146 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3147 read_extent_buffer(leaf, uuid, (unsigned long)
3148 btrfs_stripe_dev_uuid_nr(chunk, i),
3149 BTRFS_UUID_SIZE);
3150 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3151 NULL);
3152 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3153 kfree(map);
3154 free_extent_map(em);
3155 return -EIO;
3157 if (!map->stripes[i].dev) {
3158 map->stripes[i].dev =
3159 add_missing_dev(root, devid, uuid);
3160 if (!map->stripes[i].dev) {
3161 kfree(map);
3162 free_extent_map(em);
3163 return -EIO;
3166 map->stripes[i].dev->in_fs_metadata = 1;
3169 write_lock(&map_tree->map_tree.lock);
3170 ret = add_extent_mapping(&map_tree->map_tree, em);
3171 write_unlock(&map_tree->map_tree.lock);
3172 BUG_ON(ret);
3173 free_extent_map(em);
3175 return 0;
3178 static int fill_device_from_item(struct extent_buffer *leaf,
3179 struct btrfs_dev_item *dev_item,
3180 struct btrfs_device *device)
3182 unsigned long ptr;
3184 device->devid = btrfs_device_id(leaf, dev_item);
3185 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3186 device->total_bytes = device->disk_total_bytes;
3187 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3188 device->type = btrfs_device_type(leaf, dev_item);
3189 device->io_align = btrfs_device_io_align(leaf, dev_item);
3190 device->io_width = btrfs_device_io_width(leaf, dev_item);
3191 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3193 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3194 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3196 return 0;
3199 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3201 struct btrfs_fs_devices *fs_devices;
3202 int ret;
3204 mutex_lock(&uuid_mutex);
3206 fs_devices = root->fs_info->fs_devices->seed;
3207 while (fs_devices) {
3208 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3209 ret = 0;
3210 goto out;
3212 fs_devices = fs_devices->seed;
3215 fs_devices = find_fsid(fsid);
3216 if (!fs_devices) {
3217 ret = -ENOENT;
3218 goto out;
3221 fs_devices = clone_fs_devices(fs_devices);
3222 if (IS_ERR(fs_devices)) {
3223 ret = PTR_ERR(fs_devices);
3224 goto out;
3227 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3228 root->fs_info->bdev_holder);
3229 if (ret)
3230 goto out;
3232 if (!fs_devices->seeding) {
3233 __btrfs_close_devices(fs_devices);
3234 free_fs_devices(fs_devices);
3235 ret = -EINVAL;
3236 goto out;
3239 fs_devices->seed = root->fs_info->fs_devices->seed;
3240 root->fs_info->fs_devices->seed = fs_devices;
3241 out:
3242 mutex_unlock(&uuid_mutex);
3243 return ret;
3246 static int read_one_dev(struct btrfs_root *root,
3247 struct extent_buffer *leaf,
3248 struct btrfs_dev_item *dev_item)
3250 struct btrfs_device *device;
3251 u64 devid;
3252 int ret;
3253 u8 fs_uuid[BTRFS_UUID_SIZE];
3254 u8 dev_uuid[BTRFS_UUID_SIZE];
3256 devid = btrfs_device_id(leaf, dev_item);
3257 read_extent_buffer(leaf, dev_uuid,
3258 (unsigned long)btrfs_device_uuid(dev_item),
3259 BTRFS_UUID_SIZE);
3260 read_extent_buffer(leaf, fs_uuid,
3261 (unsigned long)btrfs_device_fsid(dev_item),
3262 BTRFS_UUID_SIZE);
3264 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3265 ret = open_seed_devices(root, fs_uuid);
3266 if (ret && !btrfs_test_opt(root, DEGRADED))
3267 return ret;
3270 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3271 if (!device || !device->bdev) {
3272 if (!btrfs_test_opt(root, DEGRADED))
3273 return -EIO;
3275 if (!device) {
3276 printk(KERN_WARNING "warning devid %llu missing\n",
3277 (unsigned long long)devid);
3278 device = add_missing_dev(root, devid, dev_uuid);
3279 if (!device)
3280 return -ENOMEM;
3284 if (device->fs_devices != root->fs_info->fs_devices) {
3285 BUG_ON(device->writeable);
3286 if (device->generation !=
3287 btrfs_device_generation(leaf, dev_item))
3288 return -EINVAL;
3291 fill_device_from_item(leaf, dev_item, device);
3292 device->dev_root = root->fs_info->dev_root;
3293 device->in_fs_metadata = 1;
3294 if (device->writeable)
3295 device->fs_devices->total_rw_bytes += device->total_bytes;
3296 ret = 0;
3297 return ret;
3300 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3302 struct btrfs_dev_item *dev_item;
3304 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3305 dev_item);
3306 return read_one_dev(root, buf, dev_item);
3309 int btrfs_read_sys_array(struct btrfs_root *root)
3311 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3312 struct extent_buffer *sb;
3313 struct btrfs_disk_key *disk_key;
3314 struct btrfs_chunk *chunk;
3315 u8 *ptr;
3316 unsigned long sb_ptr;
3317 int ret = 0;
3318 u32 num_stripes;
3319 u32 array_size;
3320 u32 len = 0;
3321 u32 cur;
3322 struct btrfs_key key;
3324 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3325 BTRFS_SUPER_INFO_SIZE);
3326 if (!sb)
3327 return -ENOMEM;
3328 btrfs_set_buffer_uptodate(sb);
3329 btrfs_set_buffer_lockdep_class(sb, 0);
3331 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3332 array_size = btrfs_super_sys_array_size(super_copy);
3334 ptr = super_copy->sys_chunk_array;
3335 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3336 cur = 0;
3338 while (cur < array_size) {
3339 disk_key = (struct btrfs_disk_key *)ptr;
3340 btrfs_disk_key_to_cpu(&key, disk_key);
3342 len = sizeof(*disk_key); ptr += len;
3343 sb_ptr += len;
3344 cur += len;
3346 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3347 chunk = (struct btrfs_chunk *)sb_ptr;
3348 ret = read_one_chunk(root, &key, sb, chunk);
3349 if (ret)
3350 break;
3351 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3352 len = btrfs_chunk_item_size(num_stripes);
3353 } else {
3354 ret = -EIO;
3355 break;
3357 ptr += len;
3358 sb_ptr += len;
3359 cur += len;
3361 free_extent_buffer(sb);
3362 return ret;
3365 int btrfs_read_chunk_tree(struct btrfs_root *root)
3367 struct btrfs_path *path;
3368 struct extent_buffer *leaf;
3369 struct btrfs_key key;
3370 struct btrfs_key found_key;
3371 int ret;
3372 int slot;
3374 root = root->fs_info->chunk_root;
3376 path = btrfs_alloc_path();
3377 if (!path)
3378 return -ENOMEM;
3380 /* first we search for all of the device items, and then we
3381 * read in all of the chunk items. This way we can create chunk
3382 * mappings that reference all of the devices that are afound
3384 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3385 key.offset = 0;
3386 key.type = 0;
3387 again:
3388 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3389 while (1) {
3390 leaf = path->nodes[0];
3391 slot = path->slots[0];
3392 if (slot >= btrfs_header_nritems(leaf)) {
3393 ret = btrfs_next_leaf(root, path);
3394 if (ret == 0)
3395 continue;
3396 if (ret < 0)
3397 goto error;
3398 break;
3400 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3401 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3402 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3403 break;
3404 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3405 struct btrfs_dev_item *dev_item;
3406 dev_item = btrfs_item_ptr(leaf, slot,
3407 struct btrfs_dev_item);
3408 ret = read_one_dev(root, leaf, dev_item);
3409 if (ret)
3410 goto error;
3412 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3413 struct btrfs_chunk *chunk;
3414 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3415 ret = read_one_chunk(root, &found_key, leaf, chunk);
3416 if (ret)
3417 goto error;
3419 path->slots[0]++;
3421 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3422 key.objectid = 0;
3423 btrfs_release_path(root, path);
3424 goto again;
3426 ret = 0;
3427 error:
3428 btrfs_free_path(path);
3429 return ret;