btrfs-progs: fix find_mount_root() to handle duplicated mount point correctly
[btrfs-progs-unstable/devel.git] / btrfs-image.c
blobcb17f16141f29663c700c774ff7140bb886e4e2f
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #define _XOPEN_SOURCE 500
20 #define _GNU_SOURCE 1
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <dirent.h>
29 #include <zlib.h>
30 #include "kerncompat.h"
31 #include "crc32c.h"
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "utils.h"
36 #include "version.h"
37 #include "volumes.h"
38 #include "extent_io.h"
40 #define HEADER_MAGIC 0xbd5c25e27295668bULL
41 #define MAX_PENDING_SIZE (256 * 1024)
42 #define BLOCK_SIZE 1024
43 #define BLOCK_MASK (BLOCK_SIZE - 1)
45 #define COMPRESS_NONE 0
46 #define COMPRESS_ZLIB 1
48 struct meta_cluster_item {
49 __le64 bytenr;
50 __le32 size;
51 } __attribute__ ((__packed__));
53 struct meta_cluster_header {
54 __le64 magic;
55 __le64 bytenr;
56 __le32 nritems;
57 u8 compress;
58 } __attribute__ ((__packed__));
60 /* cluster header + index items + buffers */
61 struct meta_cluster {
62 struct meta_cluster_header header;
63 struct meta_cluster_item items[];
64 } __attribute__ ((__packed__));
66 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
67 sizeof(struct meta_cluster_item))
69 struct fs_chunk {
70 u64 logical;
71 u64 physical;
72 u64 bytes;
73 struct rb_node n;
76 struct async_work {
77 struct list_head list;
78 struct list_head ordered;
79 u64 start;
80 u64 size;
81 u8 *buffer;
82 size_t bufsize;
83 int error;
86 struct metadump_struct {
87 struct btrfs_root *root;
88 FILE *out;
90 struct meta_cluster *cluster;
92 pthread_t *threads;
93 size_t num_threads;
94 pthread_mutex_t mutex;
95 pthread_cond_t cond;
96 struct rb_root name_tree;
98 struct list_head list;
99 struct list_head ordered;
100 size_t num_items;
101 size_t num_ready;
103 u64 pending_start;
104 u64 pending_size;
106 int compress_level;
107 int done;
108 int data;
109 int sanitize_names;
111 int error;
114 struct name {
115 struct rb_node n;
116 char *val;
117 char *sub;
118 u32 len;
121 struct mdrestore_struct {
122 FILE *in;
123 FILE *out;
125 pthread_t *threads;
126 size_t num_threads;
127 pthread_mutex_t mutex;
128 pthread_cond_t cond;
130 struct rb_root chunk_tree;
131 struct list_head list;
132 size_t num_items;
133 u32 leafsize;
134 u64 devid;
135 u8 uuid[BTRFS_UUID_SIZE];
136 u8 fsid[BTRFS_FSID_SIZE];
138 int compress_method;
139 int done;
140 int error;
141 int old_restore;
142 int fixup_offset;
143 int multi_devices;
144 struct btrfs_fs_info *info;
147 static void print_usage(void) __attribute__((noreturn));
148 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
149 u64 search, u64 cluster_bytenr);
150 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
152 static void csum_block(u8 *buf, size_t len)
154 char result[BTRFS_CRC32_SIZE];
155 u32 crc = ~(u32)0;
156 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
157 btrfs_csum_final(crc, result);
158 memcpy(buf, result, BTRFS_CRC32_SIZE);
161 static int has_name(struct btrfs_key *key)
163 switch (key->type) {
164 case BTRFS_DIR_ITEM_KEY:
165 case BTRFS_DIR_INDEX_KEY:
166 case BTRFS_INODE_REF_KEY:
167 case BTRFS_INODE_EXTREF_KEY:
168 case BTRFS_XATTR_ITEM_KEY:
169 return 1;
170 default:
171 break;
174 return 0;
177 static char *generate_garbage(u32 name_len)
179 char *buf = malloc(name_len);
180 int i;
182 if (!buf)
183 return NULL;
185 for (i = 0; i < name_len; i++) {
186 char c = rand() % 94 + 33;
188 if (c == '/')
189 c++;
190 buf[i] = c;
193 return buf;
196 static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
198 struct name *entry = rb_entry(a, struct name, n);
199 struct name *ins = rb_entry(b, struct name, n);
200 u32 len;
202 len = min(ins->len, entry->len);
203 return memcmp(ins->val, entry->val, len);
206 static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
208 struct fs_chunk *entry = rb_entry(a, struct fs_chunk, n);
209 struct fs_chunk *ins = rb_entry(b, struct fs_chunk, n);
211 if (fuzz && ins->logical >= entry->logical &&
212 ins->logical < entry->logical + entry->bytes)
213 return 0;
215 if (ins->logical < entry->logical)
216 return -1;
217 else if (ins->logical > entry->logical)
218 return 1;
219 return 0;
222 static void tree_insert(struct rb_root *root, struct rb_node *ins,
223 int (*cmp)(struct rb_node *a, struct rb_node *b,
224 int fuzz))
226 struct rb_node ** p = &root->rb_node;
227 struct rb_node * parent = NULL;
228 int dir;
230 while(*p) {
231 parent = *p;
233 dir = cmp(*p, ins, 0);
234 if (dir < 0)
235 p = &(*p)->rb_left;
236 else if (dir > 0)
237 p = &(*p)->rb_right;
238 else
239 BUG();
242 rb_link_node(ins, parent, p);
243 rb_insert_color(ins, root);
246 static struct rb_node *tree_search(struct rb_root *root,
247 struct rb_node *search,
248 int (*cmp)(struct rb_node *a,
249 struct rb_node *b, int fuzz),
250 int fuzz)
252 struct rb_node *n = root->rb_node;
253 int dir;
255 while (n) {
256 dir = cmp(n, search, fuzz);
257 if (dir < 0)
258 n = n->rb_left;
259 else if (dir > 0)
260 n = n->rb_right;
261 else
262 return n;
265 return NULL;
268 static char *find_collision(struct metadump_struct *md, char *name,
269 u32 name_len)
271 struct name *val;
272 struct rb_node *entry;
273 struct name tmp;
274 unsigned long checksum;
275 int found = 0;
276 int i;
278 tmp.val = name;
279 tmp.len = name_len;
280 entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
281 if (entry) {
282 val = rb_entry(entry, struct name, n);
283 free(name);
284 return val->sub;
287 val = malloc(sizeof(struct name));
288 if (!val) {
289 fprintf(stderr, "Couldn't sanitize name, enomem\n");
290 free(name);
291 return NULL;
294 memset(val, 0, sizeof(*val));
296 val->val = name;
297 val->len = name_len;
298 val->sub = malloc(name_len);
299 if (!val->sub) {
300 fprintf(stderr, "Couldn't sanitize name, enomem\n");
301 free(val);
302 free(name);
303 return NULL;
306 checksum = crc32c(~1, val->val, name_len);
307 memset(val->sub, ' ', name_len);
308 i = 0;
309 while (1) {
310 if (crc32c(~1, val->sub, name_len) == checksum &&
311 memcmp(val->sub, val->val, val->len)) {
312 found = 1;
313 break;
316 if (val->sub[i] == 127) {
317 do {
318 i++;
319 if (i >= name_len)
320 break;
321 } while (val->sub[i] == 127);
323 if (i >= name_len)
324 break;
325 val->sub[i]++;
326 if (val->sub[i] == '/')
327 val->sub[i]++;
328 memset(val->sub, ' ', i);
329 i = 0;
330 continue;
331 } else {
332 val->sub[i]++;
333 if (val->sub[i] == '/')
334 val->sub[i]++;
338 if (!found) {
339 fprintf(stderr, "Couldn't find a collision for '%.*s', "
340 "generating normal garbage, it won't match indexes\n",
341 val->len, val->val);
342 for (i = 0; i < name_len; i++) {
343 char c = rand() % 94 + 33;
345 if (c == '/')
346 c++;
347 val->sub[i] = c;
351 tree_insert(&md->name_tree, &val->n, name_cmp);
352 return val->sub;
355 static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
356 int slot)
358 struct btrfs_dir_item *dir_item;
359 char *buf;
360 char *garbage;
361 unsigned long name_ptr;
362 u32 total_len;
363 u32 cur = 0;
364 u32 this_len;
365 u32 name_len;
366 int free_garbage = (md->sanitize_names == 1);
368 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
369 total_len = btrfs_item_size_nr(eb, slot);
370 while (cur < total_len) {
371 this_len = sizeof(*dir_item) +
372 btrfs_dir_name_len(eb, dir_item) +
373 btrfs_dir_data_len(eb, dir_item);
374 name_ptr = (unsigned long)(dir_item + 1);
375 name_len = btrfs_dir_name_len(eb, dir_item);
377 if (md->sanitize_names > 1) {
378 buf = malloc(name_len);
379 if (!buf) {
380 fprintf(stderr, "Couldn't sanitize name, "
381 "enomem\n");
382 return;
384 read_extent_buffer(eb, buf, name_ptr, name_len);
385 garbage = find_collision(md, buf, name_len);
386 } else {
387 garbage = generate_garbage(name_len);
389 if (!garbage) {
390 fprintf(stderr, "Couldn't sanitize name, enomem\n");
391 return;
393 write_extent_buffer(eb, garbage, name_ptr, name_len);
394 cur += this_len;
395 dir_item = (struct btrfs_dir_item *)((char *)dir_item +
396 this_len);
397 if (free_garbage)
398 free(garbage);
402 static void sanitize_inode_ref(struct metadump_struct *md,
403 struct extent_buffer *eb, int slot, int ext)
405 struct btrfs_inode_extref *extref;
406 struct btrfs_inode_ref *ref;
407 char *garbage, *buf;
408 unsigned long ptr;
409 unsigned long name_ptr;
410 u32 item_size;
411 u32 cur_offset = 0;
412 int len;
413 int free_garbage = (md->sanitize_names == 1);
415 item_size = btrfs_item_size_nr(eb, slot);
416 ptr = btrfs_item_ptr_offset(eb, slot);
417 while (cur_offset < item_size) {
418 if (ext) {
419 extref = (struct btrfs_inode_extref *)(ptr +
420 cur_offset);
421 name_ptr = (unsigned long)(&extref->name);
422 len = btrfs_inode_extref_name_len(eb, extref);
423 cur_offset += sizeof(*extref);
424 } else {
425 ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
426 len = btrfs_inode_ref_name_len(eb, ref);
427 name_ptr = (unsigned long)(ref + 1);
428 cur_offset += sizeof(*ref);
430 cur_offset += len;
432 if (md->sanitize_names > 1) {
433 buf = malloc(len);
434 if (!buf) {
435 fprintf(stderr, "Couldn't sanitize name, "
436 "enomem\n");
437 return;
439 read_extent_buffer(eb, buf, name_ptr, len);
440 garbage = find_collision(md, buf, len);
441 } else {
442 garbage = generate_garbage(len);
445 if (!garbage) {
446 fprintf(stderr, "Couldn't sanitize name, enomem\n");
447 return;
449 write_extent_buffer(eb, garbage, name_ptr, len);
450 if (free_garbage)
451 free(garbage);
455 static void sanitize_xattr(struct metadump_struct *md,
456 struct extent_buffer *eb, int slot)
458 struct btrfs_dir_item *dir_item;
459 unsigned long data_ptr;
460 u32 data_len;
462 dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
463 data_len = btrfs_dir_data_len(eb, dir_item);
465 data_ptr = (unsigned long)((char *)(dir_item + 1) +
466 btrfs_dir_name_len(eb, dir_item));
467 memset_extent_buffer(eb, 0, data_ptr, data_len);
470 static void sanitize_name(struct metadump_struct *md, u8 *dst,
471 struct extent_buffer *src, struct btrfs_key *key,
472 int slot)
474 struct extent_buffer *eb;
476 eb = alloc_dummy_eb(src->start, src->len);
477 if (!eb) {
478 fprintf(stderr, "Couldn't sanitize name, no memory\n");
479 return;
482 memcpy(eb->data, dst, eb->len);
484 switch (key->type) {
485 case BTRFS_DIR_ITEM_KEY:
486 case BTRFS_DIR_INDEX_KEY:
487 sanitize_dir_item(md, eb, slot);
488 break;
489 case BTRFS_INODE_REF_KEY:
490 sanitize_inode_ref(md, eb, slot, 0);
491 break;
492 case BTRFS_INODE_EXTREF_KEY:
493 sanitize_inode_ref(md, eb, slot, 1);
494 break;
495 case BTRFS_XATTR_ITEM_KEY:
496 sanitize_xattr(md, eb, slot);
497 break;
498 default:
499 break;
502 memcpy(dst, eb->data, eb->len);
503 free(eb);
507 * zero inline extents and csum items
509 static void zero_items(struct metadump_struct *md, u8 *dst,
510 struct extent_buffer *src)
512 struct btrfs_file_extent_item *fi;
513 struct btrfs_item *item;
514 struct btrfs_key key;
515 u32 nritems = btrfs_header_nritems(src);
516 size_t size;
517 unsigned long ptr;
518 int i, extent_type;
520 for (i = 0; i < nritems; i++) {
521 item = btrfs_item_nr(i);
522 btrfs_item_key_to_cpu(src, &key, i);
523 if (key.type == BTRFS_CSUM_ITEM_KEY) {
524 size = btrfs_item_size_nr(src, i);
525 memset(dst + btrfs_leaf_data(src) +
526 btrfs_item_offset_nr(src, i), 0, size);
527 continue;
530 if (md->sanitize_names && has_name(&key)) {
531 sanitize_name(md, dst, src, &key, i);
532 continue;
535 if (key.type != BTRFS_EXTENT_DATA_KEY)
536 continue;
538 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
539 extent_type = btrfs_file_extent_type(src, fi);
540 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
541 continue;
543 ptr = btrfs_file_extent_inline_start(fi);
544 size = btrfs_file_extent_inline_item_len(src, item);
545 memset(dst + ptr, 0, size);
550 * copy buffer and zero useless data in the buffer
552 static void copy_buffer(struct metadump_struct *md, u8 *dst,
553 struct extent_buffer *src)
555 int level;
556 size_t size;
557 u32 nritems;
559 memcpy(dst, src->data, src->len);
560 if (src->start == BTRFS_SUPER_INFO_OFFSET)
561 return;
563 level = btrfs_header_level(src);
564 nritems = btrfs_header_nritems(src);
566 if (nritems == 0) {
567 size = sizeof(struct btrfs_header);
568 memset(dst + size, 0, src->len - size);
569 } else if (level == 0) {
570 size = btrfs_leaf_data(src) +
571 btrfs_item_offset_nr(src, nritems - 1) -
572 btrfs_item_nr_offset(nritems);
573 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
574 zero_items(md, dst, src);
575 } else {
576 size = offsetof(struct btrfs_node, ptrs) +
577 sizeof(struct btrfs_key_ptr) * nritems;
578 memset(dst + size, 0, src->len - size);
580 csum_block(dst, src->len);
583 static void *dump_worker(void *data)
585 struct metadump_struct *md = (struct metadump_struct *)data;
586 struct async_work *async;
587 int ret;
589 while (1) {
590 pthread_mutex_lock(&md->mutex);
591 while (list_empty(&md->list)) {
592 if (md->done) {
593 pthread_mutex_unlock(&md->mutex);
594 goto out;
596 pthread_cond_wait(&md->cond, &md->mutex);
598 async = list_entry(md->list.next, struct async_work, list);
599 list_del_init(&async->list);
600 pthread_mutex_unlock(&md->mutex);
602 if (md->compress_level > 0) {
603 u8 *orig = async->buffer;
605 async->bufsize = compressBound(async->size);
606 async->buffer = malloc(async->bufsize);
607 if (!async->buffer) {
608 fprintf(stderr, "Error allocing buffer\n");
609 pthread_mutex_lock(&md->mutex);
610 if (!md->error)
611 md->error = -ENOMEM;
612 pthread_mutex_unlock(&md->mutex);
613 pthread_exit(NULL);
616 ret = compress2(async->buffer,
617 (unsigned long *)&async->bufsize,
618 orig, async->size, md->compress_level);
620 if (ret != Z_OK)
621 async->error = 1;
623 free(orig);
626 pthread_mutex_lock(&md->mutex);
627 md->num_ready++;
628 pthread_mutex_unlock(&md->mutex);
630 out:
631 pthread_exit(NULL);
634 static void meta_cluster_init(struct metadump_struct *md, u64 start)
636 struct meta_cluster_header *header;
638 md->num_items = 0;
639 md->num_ready = 0;
640 header = &md->cluster->header;
641 header->magic = cpu_to_le64(HEADER_MAGIC);
642 header->bytenr = cpu_to_le64(start);
643 header->nritems = cpu_to_le32(0);
644 header->compress = md->compress_level > 0 ?
645 COMPRESS_ZLIB : COMPRESS_NONE;
648 static void metadump_destroy(struct metadump_struct *md, int num_threads)
650 int i;
651 struct rb_node *n;
653 pthread_mutex_lock(&md->mutex);
654 md->done = 1;
655 pthread_cond_broadcast(&md->cond);
656 pthread_mutex_unlock(&md->mutex);
658 for (i = 0; i < num_threads; i++)
659 pthread_join(md->threads[i], NULL);
661 pthread_cond_destroy(&md->cond);
662 pthread_mutex_destroy(&md->mutex);
664 while ((n = rb_first(&md->name_tree))) {
665 struct name *name;
667 name = rb_entry(n, struct name, n);
668 rb_erase(n, &md->name_tree);
669 free(name->val);
670 free(name->sub);
671 free(name);
673 free(md->threads);
674 free(md->cluster);
677 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
678 FILE *out, int num_threads, int compress_level,
679 int sanitize_names)
681 int i, ret = 0;
683 memset(md, 0, sizeof(*md));
684 pthread_cond_init(&md->cond, NULL);
685 pthread_mutex_init(&md->mutex, NULL);
686 INIT_LIST_HEAD(&md->list);
687 INIT_LIST_HEAD(&md->ordered);
688 md->root = root;
689 md->out = out;
690 md->pending_start = (u64)-1;
691 md->compress_level = compress_level;
692 md->cluster = calloc(1, BLOCK_SIZE);
693 md->sanitize_names = sanitize_names;
694 if (sanitize_names > 1)
695 crc32c_optimization_init();
697 if (!md->cluster) {
698 pthread_cond_destroy(&md->cond);
699 pthread_mutex_destroy(&md->mutex);
700 return -ENOMEM;
703 meta_cluster_init(md, 0);
704 if (!num_threads)
705 return 0;
707 md->name_tree.rb_node = NULL;
708 md->num_threads = num_threads;
709 md->threads = calloc(num_threads, sizeof(pthread_t));
710 if (!md->threads) {
711 free(md->cluster);
712 pthread_cond_destroy(&md->cond);
713 pthread_mutex_destroy(&md->mutex);
714 return -ENOMEM;
717 for (i = 0; i < num_threads; i++) {
718 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
719 if (ret)
720 break;
723 if (ret)
724 metadump_destroy(md, i + 1);
726 return ret;
729 static int write_zero(FILE *out, size_t size)
731 static char zero[BLOCK_SIZE];
732 return fwrite(zero, size, 1, out);
735 static int write_buffers(struct metadump_struct *md, u64 *next)
737 struct meta_cluster_header *header = &md->cluster->header;
738 struct meta_cluster_item *item;
739 struct async_work *async;
740 u64 bytenr = 0;
741 u32 nritems = 0;
742 int ret;
743 int err = 0;
745 if (list_empty(&md->ordered))
746 goto out;
748 /* wait until all buffers are compressed */
749 while (!err && md->num_items > md->num_ready) {
750 struct timespec ts = {
751 .tv_sec = 0,
752 .tv_nsec = 10000000,
754 pthread_mutex_unlock(&md->mutex);
755 nanosleep(&ts, NULL);
756 pthread_mutex_lock(&md->mutex);
757 err = md->error;
760 if (err) {
761 fprintf(stderr, "One of the threads errored out %s\n",
762 strerror(err));
763 goto out;
766 /* setup and write index block */
767 list_for_each_entry(async, &md->ordered, ordered) {
768 item = md->cluster->items + nritems;
769 item->bytenr = cpu_to_le64(async->start);
770 item->size = cpu_to_le32(async->bufsize);
771 nritems++;
773 header->nritems = cpu_to_le32(nritems);
775 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
776 if (ret != 1) {
777 fprintf(stderr, "Error writing out cluster: %d\n", errno);
778 return -EIO;
781 /* write buffers */
782 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
783 while (!list_empty(&md->ordered)) {
784 async = list_entry(md->ordered.next, struct async_work,
785 ordered);
786 list_del_init(&async->ordered);
788 bytenr += async->bufsize;
789 if (!err)
790 ret = fwrite(async->buffer, async->bufsize, 1,
791 md->out);
792 if (ret != 1) {
793 err = -EIO;
794 ret = 0;
795 fprintf(stderr, "Error writing out cluster: %d\n",
796 errno);
799 free(async->buffer);
800 free(async);
803 /* zero unused space in the last block */
804 if (!err && bytenr & BLOCK_MASK) {
805 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
807 bytenr += size;
808 ret = write_zero(md->out, size);
809 if (ret != 1) {
810 fprintf(stderr, "Error zeroing out buffer: %d\n",
811 errno);
812 err = -EIO;
815 out:
816 *next = bytenr;
817 return err;
820 static int read_data_extent(struct metadump_struct *md,
821 struct async_work *async)
823 struct btrfs_multi_bio *multi = NULL;
824 struct btrfs_device *device;
825 u64 bytes_left = async->size;
826 u64 logical = async->start;
827 u64 offset = 0;
828 u64 bytenr;
829 u64 read_len;
830 ssize_t done;
831 int fd;
832 int ret;
834 while (bytes_left) {
835 read_len = bytes_left;
836 ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
837 logical, &read_len, &multi, 0, NULL);
838 if (ret) {
839 fprintf(stderr, "Couldn't map data block %d\n", ret);
840 return ret;
843 device = multi->stripes[0].dev;
845 if (device->fd == 0) {
846 fprintf(stderr,
847 "Device we need to read from is not open\n");
848 free(multi);
849 return -EIO;
851 fd = device->fd;
852 bytenr = multi->stripes[0].physical;
853 free(multi);
855 read_len = min(read_len, bytes_left);
856 done = pread64(fd, async->buffer+offset, read_len, bytenr);
857 if (done < read_len) {
858 if (done < 0)
859 fprintf(stderr, "Error reading extent %d\n",
860 errno);
861 else
862 fprintf(stderr, "Short read\n");
863 return -EIO;
866 bytes_left -= done;
867 offset += done;
868 logical += done;
871 return 0;
874 static int flush_pending(struct metadump_struct *md, int done)
876 struct async_work *async = NULL;
877 struct extent_buffer *eb;
878 u64 blocksize = md->root->nodesize;
879 u64 start;
880 u64 size;
881 size_t offset;
882 int ret = 0;
884 if (md->pending_size) {
885 async = calloc(1, sizeof(*async));
886 if (!async)
887 return -ENOMEM;
889 async->start = md->pending_start;
890 async->size = md->pending_size;
891 async->bufsize = async->size;
892 async->buffer = malloc(async->bufsize);
893 if (!async->buffer) {
894 free(async);
895 return -ENOMEM;
897 offset = 0;
898 start = async->start;
899 size = async->size;
901 if (md->data) {
902 ret = read_data_extent(md, async);
903 if (ret) {
904 free(async->buffer);
905 free(async);
906 return ret;
910 while (!md->data && size > 0) {
911 u64 this_read = min(blocksize, size);
912 eb = read_tree_block(md->root, start, this_read, 0);
913 if (!eb) {
914 free(async->buffer);
915 free(async);
916 fprintf(stderr,
917 "Error reading metadata block\n");
918 return -EIO;
920 copy_buffer(md, async->buffer + offset, eb);
921 free_extent_buffer(eb);
922 start += this_read;
923 offset += this_read;
924 size -= this_read;
927 md->pending_start = (u64)-1;
928 md->pending_size = 0;
929 } else if (!done) {
930 return 0;
933 pthread_mutex_lock(&md->mutex);
934 if (async) {
935 list_add_tail(&async->ordered, &md->ordered);
936 md->num_items++;
937 if (md->compress_level > 0) {
938 list_add_tail(&async->list, &md->list);
939 pthread_cond_signal(&md->cond);
940 } else {
941 md->num_ready++;
944 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
945 ret = write_buffers(md, &start);
946 if (ret)
947 fprintf(stderr, "Error writing buffers %d\n",
948 errno);
949 else
950 meta_cluster_init(md, start);
952 pthread_mutex_unlock(&md->mutex);
953 return ret;
956 static int add_extent(u64 start, u64 size, struct metadump_struct *md,
957 int data)
959 int ret;
960 if (md->data != data ||
961 md->pending_size + size > MAX_PENDING_SIZE ||
962 md->pending_start + md->pending_size != start) {
963 ret = flush_pending(md, 0);
964 if (ret)
965 return ret;
966 md->pending_start = start;
968 readahead_tree_block(md->root, start, size, 0);
969 md->pending_size += size;
970 md->data = data;
971 return 0;
974 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
975 static int is_tree_block(struct btrfs_root *extent_root,
976 struct btrfs_path *path, u64 bytenr)
978 struct extent_buffer *leaf;
979 struct btrfs_key key;
980 u64 ref_objectid;
981 int ret;
983 leaf = path->nodes[0];
984 while (1) {
985 struct btrfs_extent_ref_v0 *ref_item;
986 path->slots[0]++;
987 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
988 ret = btrfs_next_leaf(extent_root, path);
989 if (ret < 0)
990 return ret;
991 if (ret > 0)
992 break;
993 leaf = path->nodes[0];
995 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
996 if (key.objectid != bytenr)
997 break;
998 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
999 continue;
1000 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1001 struct btrfs_extent_ref_v0);
1002 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
1003 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
1004 return 1;
1005 break;
1007 return 0;
1009 #endif
1011 static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
1012 struct metadump_struct *metadump, int root_tree)
1014 struct extent_buffer *tmp;
1015 struct btrfs_root_item *ri;
1016 struct btrfs_key key;
1017 u64 bytenr;
1018 int level;
1019 int nritems = 0;
1020 int i = 0;
1021 int ret;
1023 ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
1024 if (ret) {
1025 fprintf(stderr, "Error adding metadata block\n");
1026 return ret;
1029 if (btrfs_header_level(eb) == 0 && !root_tree)
1030 return 0;
1032 level = btrfs_header_level(eb);
1033 nritems = btrfs_header_nritems(eb);
1034 for (i = 0; i < nritems; i++) {
1035 if (level == 0) {
1036 btrfs_item_key_to_cpu(eb, &key, i);
1037 if (key.type != BTRFS_ROOT_ITEM_KEY)
1038 continue;
1039 ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
1040 bytenr = btrfs_disk_root_bytenr(eb, ri);
1041 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1042 if (!tmp) {
1043 fprintf(stderr,
1044 "Error reading log root block\n");
1045 return -EIO;
1047 ret = copy_tree_blocks(root, tmp, metadump, 0);
1048 free_extent_buffer(tmp);
1049 if (ret)
1050 return ret;
1051 } else {
1052 bytenr = btrfs_node_blockptr(eb, i);
1053 tmp = read_tree_block(root, bytenr, root->leafsize, 0);
1054 if (!tmp) {
1055 fprintf(stderr, "Error reading log block\n");
1056 return -EIO;
1058 ret = copy_tree_blocks(root, tmp, metadump, root_tree);
1059 free_extent_buffer(tmp);
1060 if (ret)
1061 return ret;
1065 return 0;
1068 static int copy_log_trees(struct btrfs_root *root,
1069 struct metadump_struct *metadump,
1070 struct btrfs_path *path)
1072 u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
1074 if (blocknr == 0)
1075 return 0;
1077 if (!root->fs_info->log_root_tree ||
1078 !root->fs_info->log_root_tree->node) {
1079 fprintf(stderr, "Error copying tree log, it wasn't setup\n");
1080 return -EIO;
1083 return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
1084 metadump, 1);
1087 static int copy_space_cache(struct btrfs_root *root,
1088 struct metadump_struct *metadump,
1089 struct btrfs_path *path)
1091 struct extent_buffer *leaf;
1092 struct btrfs_file_extent_item *fi;
1093 struct btrfs_key key;
1094 u64 bytenr, num_bytes;
1095 int ret;
1097 root = root->fs_info->tree_root;
1099 key.objectid = 0;
1100 key.type = BTRFS_EXTENT_DATA_KEY;
1101 key.offset = 0;
1103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1104 if (ret < 0) {
1105 fprintf(stderr, "Error searching for free space inode %d\n",
1106 ret);
1107 return ret;
1110 leaf = path->nodes[0];
1112 while (1) {
1113 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1114 ret = btrfs_next_leaf(root, path);
1115 if (ret < 0) {
1116 fprintf(stderr, "Error going to next leaf "
1117 "%d\n", ret);
1118 return ret;
1120 if (ret > 0)
1121 break;
1122 leaf = path->nodes[0];
1125 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1126 if (key.type != BTRFS_EXTENT_DATA_KEY) {
1127 path->slots[0]++;
1128 continue;
1131 fi = btrfs_item_ptr(leaf, path->slots[0],
1132 struct btrfs_file_extent_item);
1133 if (btrfs_file_extent_type(leaf, fi) !=
1134 BTRFS_FILE_EXTENT_REG) {
1135 path->slots[0]++;
1136 continue;
1139 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1140 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1141 ret = add_extent(bytenr, num_bytes, metadump, 1);
1142 if (ret) {
1143 fprintf(stderr, "Error adding space cache blocks %d\n",
1144 ret);
1145 btrfs_release_path(path);
1146 return ret;
1148 path->slots[0]++;
1151 return 0;
1154 static int copy_from_extent_tree(struct metadump_struct *metadump,
1155 struct btrfs_path *path)
1157 struct btrfs_root *extent_root;
1158 struct extent_buffer *leaf;
1159 struct btrfs_extent_item *ei;
1160 struct btrfs_key key;
1161 u64 bytenr;
1162 u64 num_bytes;
1163 int ret;
1165 extent_root = metadump->root->fs_info->extent_root;
1166 bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
1167 key.objectid = bytenr;
1168 key.type = BTRFS_EXTENT_ITEM_KEY;
1169 key.offset = 0;
1171 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1172 if (ret < 0) {
1173 fprintf(stderr, "Error searching extent root %d\n", ret);
1174 return ret;
1176 ret = 0;
1178 leaf = path->nodes[0];
1180 while (1) {
1181 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1182 ret = btrfs_next_leaf(extent_root, path);
1183 if (ret < 0) {
1184 fprintf(stderr, "Error going to next leaf %d"
1185 "\n", ret);
1186 break;
1188 if (ret > 0) {
1189 ret = 0;
1190 break;
1192 leaf = path->nodes[0];
1195 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1196 if (key.objectid < bytenr ||
1197 (key.type != BTRFS_EXTENT_ITEM_KEY &&
1198 key.type != BTRFS_METADATA_ITEM_KEY)) {
1199 path->slots[0]++;
1200 continue;
1203 bytenr = key.objectid;
1204 if (key.type == BTRFS_METADATA_ITEM_KEY)
1205 num_bytes = extent_root->leafsize;
1206 else
1207 num_bytes = key.offset;
1209 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
1210 ei = btrfs_item_ptr(leaf, path->slots[0],
1211 struct btrfs_extent_item);
1212 if (btrfs_extent_flags(leaf, ei) &
1213 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1214 ret = add_extent(bytenr, num_bytes, metadump,
1216 if (ret) {
1217 fprintf(stderr, "Error adding block "
1218 "%d\n", ret);
1219 break;
1222 } else {
1223 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1224 ret = is_tree_block(extent_root, path, bytenr);
1225 if (ret < 0) {
1226 fprintf(stderr, "Error checking tree block "
1227 "%d\n", ret);
1228 break;
1231 if (ret) {
1232 ret = add_extent(bytenr, num_bytes, metadump,
1234 if (ret) {
1235 fprintf(stderr, "Error adding block "
1236 "%d\n", ret);
1237 break;
1240 ret = 0;
1241 #else
1242 fprintf(stderr, "Either extent tree corruption or "
1243 "you haven't built with V0 support\n");
1244 ret = -EIO;
1245 break;
1246 #endif
1248 bytenr += num_bytes;
1251 btrfs_release_path(path);
1253 return ret;
1256 static int create_metadump(const char *input, FILE *out, int num_threads,
1257 int compress_level, int sanitize, int walk_trees)
1259 struct btrfs_root *root;
1260 struct btrfs_path *path = NULL;
1261 struct metadump_struct metadump;
1262 int ret;
1263 int err = 0;
1265 root = open_ctree(input, 0, 0);
1266 if (!root) {
1267 fprintf(stderr, "Open ctree failed\n");
1268 return -EIO;
1271 BUG_ON(root->nodesize != root->leafsize);
1273 ret = metadump_init(&metadump, root, out, num_threads,
1274 compress_level, sanitize);
1275 if (ret) {
1276 fprintf(stderr, "Error initing metadump %d\n", ret);
1277 close_ctree(root);
1278 return ret;
1281 ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
1282 &metadump, 0);
1283 if (ret) {
1284 fprintf(stderr, "Error adding metadata %d\n", ret);
1285 err = ret;
1286 goto out;
1289 path = btrfs_alloc_path();
1290 if (!path) {
1291 fprintf(stderr, "Out of memory allocing path\n");
1292 err = -ENOMEM;
1293 goto out;
1296 if (walk_trees) {
1297 ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
1298 &metadump, 1);
1299 if (ret) {
1300 err = ret;
1301 goto out;
1304 ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
1305 &metadump, 1);
1306 if (ret) {
1307 err = ret;
1308 goto out;
1310 } else {
1311 ret = copy_from_extent_tree(&metadump, path);
1312 if (ret) {
1313 err = ret;
1314 goto out;
1318 ret = copy_log_trees(root, &metadump, path);
1319 if (ret) {
1320 err = ret;
1321 goto out;
1324 ret = copy_space_cache(root, &metadump, path);
1325 out:
1326 ret = flush_pending(&metadump, 1);
1327 if (ret) {
1328 if (!err)
1329 err = ret;
1330 fprintf(stderr, "Error flushing pending %d\n", ret);
1333 metadump_destroy(&metadump, num_threads);
1335 btrfs_free_path(path);
1336 ret = close_ctree(root);
1337 return err ? err : ret;
1340 static void update_super_old(u8 *buffer)
1342 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1343 struct btrfs_chunk *chunk;
1344 struct btrfs_disk_key *key;
1345 u32 sectorsize = btrfs_super_sectorsize(super);
1346 u64 flags = btrfs_super_flags(super);
1348 flags |= BTRFS_SUPER_FLAG_METADUMP;
1349 btrfs_set_super_flags(super, flags);
1351 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
1352 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
1353 sizeof(struct btrfs_disk_key));
1355 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1356 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
1357 btrfs_set_disk_key_offset(key, 0);
1359 btrfs_set_stack_chunk_length(chunk, (u64)-1);
1360 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
1361 btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
1362 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
1363 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
1364 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
1365 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
1366 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1367 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1368 chunk->stripe.devid = super->dev_item.devid;
1369 btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
1370 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
1371 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
1372 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1375 static int update_super(u8 *buffer)
1377 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
1378 struct btrfs_chunk *chunk;
1379 struct btrfs_disk_key *disk_key;
1380 struct btrfs_key key;
1381 u32 new_array_size = 0;
1382 u32 array_size;
1383 u32 cur = 0;
1384 u8 *ptr, *write_ptr;
1385 int old_num_stripes;
1387 write_ptr = ptr = super->sys_chunk_array;
1388 array_size = btrfs_super_sys_array_size(super);
1390 while (cur < array_size) {
1391 disk_key = (struct btrfs_disk_key *)ptr;
1392 btrfs_disk_key_to_cpu(&key, disk_key);
1394 new_array_size += sizeof(*disk_key);
1395 memmove(write_ptr, ptr, sizeof(*disk_key));
1397 write_ptr += sizeof(*disk_key);
1398 ptr += sizeof(*disk_key);
1399 cur += sizeof(*disk_key);
1401 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1402 chunk = (struct btrfs_chunk *)ptr;
1403 old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1404 chunk = (struct btrfs_chunk *)write_ptr;
1406 memmove(write_ptr, ptr, sizeof(*chunk));
1407 btrfs_set_stack_chunk_num_stripes(chunk, 1);
1408 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
1409 btrfs_set_stack_chunk_type(chunk,
1410 BTRFS_BLOCK_GROUP_SYSTEM);
1411 chunk->stripe.devid = super->dev_item.devid;
1412 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
1413 BTRFS_UUID_SIZE);
1414 new_array_size += sizeof(*chunk);
1415 } else {
1416 fprintf(stderr, "Bogus key in the sys chunk array "
1417 "%d\n", key.type);
1418 return -EIO;
1420 write_ptr += sizeof(*chunk);
1421 ptr += btrfs_chunk_item_size(old_num_stripes);
1422 cur += btrfs_chunk_item_size(old_num_stripes);
1425 btrfs_set_super_sys_array_size(super, new_array_size);
1426 csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
1428 return 0;
1431 static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
1433 struct extent_buffer *eb;
1435 eb = malloc(sizeof(struct extent_buffer) + size);
1436 if (!eb)
1437 return NULL;
1438 memset(eb, 0, sizeof(struct extent_buffer) + size);
1440 eb->start = bytenr;
1441 eb->len = size;
1442 return eb;
1445 static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
1447 struct btrfs_item *item;
1448 u32 nritems;
1449 u32 old_size;
1450 u32 old_data_start;
1451 u32 size_diff;
1452 u32 data_end;
1453 int i;
1455 old_size = btrfs_item_size_nr(eb, slot);
1456 if (old_size == new_size)
1457 return;
1459 nritems = btrfs_header_nritems(eb);
1460 data_end = btrfs_item_offset_nr(eb, nritems - 1);
1462 old_data_start = btrfs_item_offset_nr(eb, slot);
1463 size_diff = old_size - new_size;
1465 for (i = slot; i < nritems; i++) {
1466 u32 ioff;
1467 item = btrfs_item_nr(i);
1468 ioff = btrfs_item_offset(eb, item);
1469 btrfs_set_item_offset(eb, item, ioff + size_diff);
1472 memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
1473 btrfs_leaf_data(eb) + data_end,
1474 old_data_start + new_size - data_end);
1475 item = btrfs_item_nr(slot);
1476 btrfs_set_item_size(eb, item, new_size);
1479 static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
1480 struct async_work *async, u8 *buffer,
1481 size_t size)
1483 struct extent_buffer *eb;
1484 size_t size_left = size;
1485 u64 bytenr = async->start;
1486 int i;
1488 if (size_left % mdres->leafsize)
1489 return 0;
1491 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1492 if (!eb)
1493 return -ENOMEM;
1495 while (size_left) {
1496 eb->start = bytenr;
1497 memcpy(eb->data, buffer, mdres->leafsize);
1499 if (btrfs_header_bytenr(eb) != bytenr)
1500 break;
1501 if (memcmp(mdres->fsid,
1502 eb->data + offsetof(struct btrfs_header, fsid),
1503 BTRFS_FSID_SIZE))
1504 break;
1506 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
1507 goto next;
1509 if (btrfs_header_level(eb) != 0)
1510 goto next;
1512 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1513 struct btrfs_chunk chunk;
1514 struct btrfs_key key;
1515 u64 type;
1517 btrfs_item_key_to_cpu(eb, &key, i);
1518 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1519 continue;
1520 truncate_item(eb, i, sizeof(chunk));
1521 read_extent_buffer(eb, &chunk,
1522 btrfs_item_ptr_offset(eb, i),
1523 sizeof(chunk));
1525 /* Zero out the RAID profile */
1526 type = btrfs_stack_chunk_type(&chunk);
1527 type &= (BTRFS_BLOCK_GROUP_DATA |
1528 BTRFS_BLOCK_GROUP_SYSTEM |
1529 BTRFS_BLOCK_GROUP_METADATA |
1530 BTRFS_BLOCK_GROUP_DUP);
1531 btrfs_set_stack_chunk_type(&chunk, type);
1533 btrfs_set_stack_chunk_num_stripes(&chunk, 1);
1534 btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
1535 btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
1536 memcpy(chunk.stripe.dev_uuid, mdres->uuid,
1537 BTRFS_UUID_SIZE);
1538 write_extent_buffer(eb, &chunk,
1539 btrfs_item_ptr_offset(eb, i),
1540 sizeof(chunk));
1542 memcpy(buffer, eb->data, eb->len);
1543 csum_block(buffer, eb->len);
1544 next:
1545 size_left -= mdres->leafsize;
1546 buffer += mdres->leafsize;
1547 bytenr += mdres->leafsize;
1550 free(eb);
1551 return 0;
1554 static void write_backup_supers(int fd, u8 *buf)
1556 struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
1557 struct stat st;
1558 u64 size;
1559 u64 bytenr;
1560 int i;
1561 int ret;
1563 if (fstat(fd, &st)) {
1564 fprintf(stderr, "Couldn't stat restore point, won't be able "
1565 "to write backup supers: %d\n", errno);
1566 return;
1569 size = btrfs_device_size(fd, &st);
1571 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1572 bytenr = btrfs_sb_offset(i);
1573 if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
1574 break;
1575 btrfs_set_super_bytenr(super, bytenr);
1576 csum_block(buf, BTRFS_SUPER_INFO_SIZE);
1577 ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
1578 if (ret < BTRFS_SUPER_INFO_SIZE) {
1579 if (ret < 0)
1580 fprintf(stderr, "Problem writing out backup "
1581 "super block %d, err %d\n", i, errno);
1582 else
1583 fprintf(stderr, "Short write writing out "
1584 "backup super block\n");
1585 break;
1590 static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
1592 struct fs_chunk *fs_chunk;
1593 struct rb_node *entry;
1594 struct fs_chunk search;
1595 u64 offset;
1597 if (logical == BTRFS_SUPER_INFO_OFFSET)
1598 return logical;
1600 search.logical = logical;
1601 entry = tree_search(&mdres->chunk_tree, &search.n, chunk_cmp, 1);
1602 if (!entry) {
1603 if (mdres->in != stdin)
1604 printf("Couldn't find a chunk, using logical\n");
1605 return logical;
1607 fs_chunk = rb_entry(entry, struct fs_chunk, n);
1608 if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
1609 BUG();
1610 offset = search.logical - fs_chunk->logical;
1612 *size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
1613 return fs_chunk->physical + offset;
1616 static void *restore_worker(void *data)
1618 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
1619 struct async_work *async;
1620 size_t size;
1621 u8 *buffer;
1622 u8 *outbuf;
1623 int outfd;
1624 int ret;
1625 int compress_size = MAX_PENDING_SIZE * 4;
1627 outfd = fileno(mdres->out);
1628 buffer = malloc(compress_size);
1629 if (!buffer) {
1630 fprintf(stderr, "Error allocing buffer\n");
1631 pthread_mutex_lock(&mdres->mutex);
1632 if (!mdres->error)
1633 mdres->error = -ENOMEM;
1634 pthread_mutex_unlock(&mdres->mutex);
1635 pthread_exit(NULL);
1638 while (1) {
1639 u64 bytenr;
1640 off_t offset = 0;
1641 int err = 0;
1643 pthread_mutex_lock(&mdres->mutex);
1644 while (!mdres->leafsize || list_empty(&mdres->list)) {
1645 if (mdres->done) {
1646 pthread_mutex_unlock(&mdres->mutex);
1647 goto out;
1649 pthread_cond_wait(&mdres->cond, &mdres->mutex);
1651 async = list_entry(mdres->list.next, struct async_work, list);
1652 list_del_init(&async->list);
1653 pthread_mutex_unlock(&mdres->mutex);
1655 if (mdres->compress_method == COMPRESS_ZLIB) {
1656 size = compress_size;
1657 ret = uncompress(buffer, (unsigned long *)&size,
1658 async->buffer, async->bufsize);
1659 if (ret != Z_OK) {
1660 fprintf(stderr, "Error decompressing %d\n",
1661 ret);
1662 err = -EIO;
1664 outbuf = buffer;
1665 } else {
1666 outbuf = async->buffer;
1667 size = async->bufsize;
1670 if (!mdres->multi_devices) {
1671 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1672 if (mdres->old_restore) {
1673 update_super_old(outbuf);
1674 } else {
1675 ret = update_super(outbuf);
1676 if (ret)
1677 err = ret;
1679 } else if (!mdres->old_restore) {
1680 ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
1681 if (ret)
1682 err = ret;
1686 if (!mdres->fixup_offset) {
1687 while (size) {
1688 u64 chunk_size = size;
1689 if (!mdres->multi_devices && !mdres->old_restore)
1690 bytenr = logical_to_physical(mdres,
1691 async->start + offset,
1692 &chunk_size);
1693 else
1694 bytenr = async->start + offset;
1696 ret = pwrite64(outfd, outbuf+offset, chunk_size,
1697 bytenr);
1698 if (ret != chunk_size) {
1699 if (ret < 0) {
1700 fprintf(stderr, "Error writing to "
1701 "device %d\n", errno);
1702 err = errno;
1703 break;
1704 } else {
1705 fprintf(stderr, "Short write\n");
1706 err = -EIO;
1707 break;
1710 size -= chunk_size;
1711 offset += chunk_size;
1713 } else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
1714 ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
1715 if (ret) {
1716 printk("Error write data\n");
1717 exit(1);
1722 /* backup super blocks are already there at fixup_offset stage */
1723 if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
1724 write_backup_supers(outfd, outbuf);
1726 pthread_mutex_lock(&mdres->mutex);
1727 if (err && !mdres->error)
1728 mdres->error = err;
1729 mdres->num_items--;
1730 pthread_mutex_unlock(&mdres->mutex);
1732 free(async->buffer);
1733 free(async);
1735 out:
1736 free(buffer);
1737 pthread_exit(NULL);
1740 static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
1742 struct rb_node *n;
1743 int i;
1745 while ((n = rb_first(&mdres->chunk_tree))) {
1746 struct fs_chunk *entry;
1748 entry = rb_entry(n, struct fs_chunk, n);
1749 rb_erase(n, &mdres->chunk_tree);
1750 free(entry);
1752 pthread_mutex_lock(&mdres->mutex);
1753 mdres->done = 1;
1754 pthread_cond_broadcast(&mdres->cond);
1755 pthread_mutex_unlock(&mdres->mutex);
1757 for (i = 0; i < num_threads; i++)
1758 pthread_join(mdres->threads[i], NULL);
1760 pthread_cond_destroy(&mdres->cond);
1761 pthread_mutex_destroy(&mdres->mutex);
1762 free(mdres->threads);
1765 static int mdrestore_init(struct mdrestore_struct *mdres,
1766 FILE *in, FILE *out, int old_restore,
1767 int num_threads, int fixup_offset,
1768 struct btrfs_fs_info *info, int multi_devices)
1770 int i, ret = 0;
1772 memset(mdres, 0, sizeof(*mdres));
1773 pthread_cond_init(&mdres->cond, NULL);
1774 pthread_mutex_init(&mdres->mutex, NULL);
1775 INIT_LIST_HEAD(&mdres->list);
1776 mdres->in = in;
1777 mdres->out = out;
1778 mdres->old_restore = old_restore;
1779 mdres->chunk_tree.rb_node = NULL;
1780 mdres->fixup_offset = fixup_offset;
1781 mdres->info = info;
1782 mdres->multi_devices = multi_devices;
1784 if (!num_threads)
1785 return 0;
1787 mdres->num_threads = num_threads;
1788 mdres->threads = calloc(num_threads, sizeof(pthread_t));
1789 if (!mdres->threads)
1790 return -ENOMEM;
1791 for (i = 0; i < num_threads; i++) {
1792 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
1793 mdres);
1794 if (ret)
1795 break;
1797 if (ret)
1798 mdrestore_destroy(mdres, i + 1);
1799 return ret;
1802 static int fill_mdres_info(struct mdrestore_struct *mdres,
1803 struct async_work *async)
1805 struct btrfs_super_block *super;
1806 u8 *buffer = NULL;
1807 u8 *outbuf;
1808 int ret;
1810 /* We've already been initialized */
1811 if (mdres->leafsize)
1812 return 0;
1814 if (mdres->compress_method == COMPRESS_ZLIB) {
1815 size_t size = MAX_PENDING_SIZE * 2;
1817 buffer = malloc(MAX_PENDING_SIZE * 2);
1818 if (!buffer)
1819 return -ENOMEM;
1820 ret = uncompress(buffer, (unsigned long *)&size,
1821 async->buffer, async->bufsize);
1822 if (ret != Z_OK) {
1823 fprintf(stderr, "Error decompressing %d\n", ret);
1824 free(buffer);
1825 return -EIO;
1827 outbuf = buffer;
1828 } else {
1829 outbuf = async->buffer;
1832 super = (struct btrfs_super_block *)outbuf;
1833 mdres->leafsize = btrfs_super_leafsize(super);
1834 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
1835 memcpy(mdres->uuid, super->dev_item.uuid,
1836 BTRFS_UUID_SIZE);
1837 mdres->devid = le64_to_cpu(super->dev_item.devid);
1838 free(buffer);
1839 return 0;
1842 static int add_cluster(struct meta_cluster *cluster,
1843 struct mdrestore_struct *mdres, u64 *next)
1845 struct meta_cluster_item *item;
1846 struct meta_cluster_header *header = &cluster->header;
1847 struct async_work *async;
1848 u64 bytenr;
1849 u32 i, nritems;
1850 int ret;
1852 BUG_ON(mdres->num_items);
1853 mdres->compress_method = header->compress;
1855 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
1856 nritems = le32_to_cpu(header->nritems);
1857 for (i = 0; i < nritems; i++) {
1858 item = &cluster->items[i];
1859 async = calloc(1, sizeof(*async));
1860 if (!async) {
1861 fprintf(stderr, "Error allocating async\n");
1862 return -ENOMEM;
1864 async->start = le64_to_cpu(item->bytenr);
1865 async->bufsize = le32_to_cpu(item->size);
1866 async->buffer = malloc(async->bufsize);
1867 if (!async->buffer) {
1868 fprintf(stderr, "Error allocing async buffer\n");
1869 free(async);
1870 return -ENOMEM;
1872 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
1873 if (ret != 1) {
1874 fprintf(stderr, "Error reading buffer %d\n", errno);
1875 free(async->buffer);
1876 free(async);
1877 return -EIO;
1879 bytenr += async->bufsize;
1881 pthread_mutex_lock(&mdres->mutex);
1882 if (async->start == BTRFS_SUPER_INFO_OFFSET) {
1883 ret = fill_mdres_info(mdres, async);
1884 if (ret) {
1885 fprintf(stderr, "Error setting up restore\n");
1886 pthread_mutex_unlock(&mdres->mutex);
1887 free(async->buffer);
1888 free(async);
1889 return ret;
1892 list_add_tail(&async->list, &mdres->list);
1893 mdres->num_items++;
1894 pthread_cond_signal(&mdres->cond);
1895 pthread_mutex_unlock(&mdres->mutex);
1897 if (bytenr & BLOCK_MASK) {
1898 char buffer[BLOCK_MASK];
1899 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
1901 bytenr += size;
1902 ret = fread(buffer, size, 1, mdres->in);
1903 if (ret != 1) {
1904 fprintf(stderr, "Error reading in buffer %d\n", errno);
1905 return -EIO;
1908 *next = bytenr;
1909 return 0;
1912 static int wait_for_worker(struct mdrestore_struct *mdres)
1914 int ret = 0;
1916 pthread_mutex_lock(&mdres->mutex);
1917 ret = mdres->error;
1918 while (!ret && mdres->num_items > 0) {
1919 struct timespec ts = {
1920 .tv_sec = 0,
1921 .tv_nsec = 10000000,
1923 pthread_mutex_unlock(&mdres->mutex);
1924 nanosleep(&ts, NULL);
1925 pthread_mutex_lock(&mdres->mutex);
1926 ret = mdres->error;
1928 pthread_mutex_unlock(&mdres->mutex);
1929 return ret;
1932 static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
1933 u64 bytenr, u64 item_bytenr, u32 bufsize,
1934 u64 cluster_bytenr)
1936 struct extent_buffer *eb;
1937 int ret = 0;
1938 int i;
1940 eb = alloc_dummy_eb(bytenr, mdres->leafsize);
1941 if (!eb) {
1942 ret = -ENOMEM;
1943 goto out;
1946 while (item_bytenr != bytenr) {
1947 buffer += mdres->leafsize;
1948 item_bytenr += mdres->leafsize;
1951 memcpy(eb->data, buffer, mdres->leafsize);
1952 if (btrfs_header_bytenr(eb) != bytenr) {
1953 fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
1954 ret = -EIO;
1955 goto out;
1958 if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
1959 BTRFS_FSID_SIZE)) {
1960 fprintf(stderr, "Fsid doesn't match\n");
1961 ret = -EIO;
1962 goto out;
1965 if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
1966 fprintf(stderr, "Does not belong to the chunk tree\n");
1967 ret = -EIO;
1968 goto out;
1971 for (i = 0; i < btrfs_header_nritems(eb); i++) {
1972 struct btrfs_chunk chunk;
1973 struct fs_chunk *fs_chunk;
1974 struct btrfs_key key;
1976 if (btrfs_header_level(eb)) {
1977 u64 blockptr = btrfs_node_blockptr(eb, i);
1979 ret = search_for_chunk_blocks(mdres, blockptr,
1980 cluster_bytenr);
1981 if (ret)
1982 break;
1983 continue;
1986 /* Yay a leaf! We loves leafs! */
1987 btrfs_item_key_to_cpu(eb, &key, i);
1988 if (key.type != BTRFS_CHUNK_ITEM_KEY)
1989 continue;
1991 fs_chunk = malloc(sizeof(struct fs_chunk));
1992 if (!fs_chunk) {
1993 fprintf(stderr, "Erorr allocating chunk\n");
1994 ret = -ENOMEM;
1995 break;
1997 memset(fs_chunk, 0, sizeof(*fs_chunk));
1998 read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
1999 sizeof(chunk));
2001 fs_chunk->logical = key.offset;
2002 fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
2003 fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
2004 tree_insert(&mdres->chunk_tree, &fs_chunk->n, chunk_cmp);
2006 out:
2007 free(eb);
2008 return ret;
2011 /* If you have to ask you aren't worthy */
2012 static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
2013 u64 search, u64 cluster_bytenr)
2015 struct meta_cluster *cluster;
2016 struct meta_cluster_header *header;
2017 struct meta_cluster_item *item;
2018 u64 current_cluster = cluster_bytenr, bytenr;
2019 u64 item_bytenr;
2020 u32 bufsize, nritems, i;
2021 u32 max_size = MAX_PENDING_SIZE * 2;
2022 u8 *buffer, *tmp = NULL;
2023 int ret = 0;
2025 cluster = malloc(BLOCK_SIZE);
2026 if (!cluster) {
2027 fprintf(stderr, "Error allocating cluster\n");
2028 return -ENOMEM;
2031 buffer = malloc(max_size);
2032 if (!buffer) {
2033 fprintf(stderr, "Error allocing buffer\n");
2034 free(cluster);
2035 return -ENOMEM;
2038 if (mdres->compress_method == COMPRESS_ZLIB) {
2039 tmp = malloc(max_size);
2040 if (!tmp) {
2041 fprintf(stderr, "Error allocing tmp buffer\n");
2042 free(cluster);
2043 free(buffer);
2044 return -ENOMEM;
2048 bytenr = current_cluster;
2049 while (1) {
2050 if (fseek(mdres->in, current_cluster, SEEK_SET)) {
2051 fprintf(stderr, "Error seeking: %d\n", errno);
2052 ret = -EIO;
2053 break;
2056 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2057 if (ret == 0) {
2058 if (cluster_bytenr != 0) {
2059 cluster_bytenr = 0;
2060 current_cluster = 0;
2061 bytenr = 0;
2062 continue;
2064 printf("ok this is where we screwed up?\n");
2065 ret = -EIO;
2066 break;
2067 } else if (ret < 0) {
2068 fprintf(stderr, "Error reading image\n");
2069 break;
2071 ret = 0;
2073 header = &cluster->header;
2074 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2075 le64_to_cpu(header->bytenr) != current_cluster) {
2076 fprintf(stderr, "bad header in metadump image\n");
2077 ret = -EIO;
2078 break;
2081 bytenr += BLOCK_SIZE;
2082 nritems = le32_to_cpu(header->nritems);
2083 for (i = 0; i < nritems; i++) {
2084 size_t size;
2086 item = &cluster->items[i];
2087 bufsize = le32_to_cpu(item->size);
2088 item_bytenr = le64_to_cpu(item->bytenr);
2090 if (bufsize > max_size) {
2091 fprintf(stderr, "item %u size %u too big\n",
2092 i, bufsize);
2093 ret = -EIO;
2094 break;
2097 if (mdres->compress_method == COMPRESS_ZLIB) {
2098 ret = fread(tmp, bufsize, 1, mdres->in);
2099 if (ret != 1) {
2100 fprintf(stderr, "Error reading: %d\n",
2101 errno);
2102 ret = -EIO;
2103 break;
2106 size = max_size;
2107 ret = uncompress(buffer,
2108 (unsigned long *)&size, tmp,
2109 bufsize);
2110 if (ret != Z_OK) {
2111 fprintf(stderr, "Error decompressing "
2112 "%d\n", ret);
2113 ret = -EIO;
2114 break;
2116 } else {
2117 ret = fread(buffer, bufsize, 1, mdres->in);
2118 if (ret != 1) {
2119 fprintf(stderr, "Error reading: %d\n",
2120 errno);
2121 ret = -EIO;
2122 break;
2124 size = bufsize;
2126 ret = 0;
2128 if (item_bytenr <= search &&
2129 item_bytenr + size > search) {
2130 ret = read_chunk_block(mdres, buffer, search,
2131 item_bytenr, size,
2132 current_cluster);
2133 if (!ret)
2134 ret = 1;
2135 break;
2137 bytenr += bufsize;
2139 if (ret) {
2140 if (ret > 0)
2141 ret = 0;
2142 break;
2144 if (bytenr & BLOCK_MASK)
2145 bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
2146 current_cluster = bytenr;
2149 free(tmp);
2150 free(buffer);
2151 free(cluster);
2152 return ret;
2155 static int build_chunk_tree(struct mdrestore_struct *mdres,
2156 struct meta_cluster *cluster)
2158 struct btrfs_super_block *super;
2159 struct meta_cluster_header *header;
2160 struct meta_cluster_item *item = NULL;
2161 u64 chunk_root_bytenr = 0;
2162 u32 i, nritems;
2163 u64 bytenr = 0;
2164 u8 *buffer;
2165 int ret;
2167 /* We can't seek with stdin so don't bother doing this */
2168 if (mdres->in == stdin)
2169 return 0;
2171 ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
2172 if (ret <= 0) {
2173 fprintf(stderr, "Error reading in cluster: %d\n", errno);
2174 return -EIO;
2176 ret = 0;
2178 header = &cluster->header;
2179 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2180 le64_to_cpu(header->bytenr) != 0) {
2181 fprintf(stderr, "bad header in metadump image\n");
2182 return -EIO;
2185 bytenr += BLOCK_SIZE;
2186 mdres->compress_method = header->compress;
2187 nritems = le32_to_cpu(header->nritems);
2188 for (i = 0; i < nritems; i++) {
2189 item = &cluster->items[i];
2191 if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
2192 break;
2193 bytenr += le32_to_cpu(item->size);
2194 if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
2195 fprintf(stderr, "Error seeking: %d\n", errno);
2196 return -EIO;
2200 if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
2201 fprintf(stderr, "Huh, didn't find the super?\n");
2202 return -EINVAL;
2205 buffer = malloc(le32_to_cpu(item->size));
2206 if (!buffer) {
2207 fprintf(stderr, "Error allocing buffer\n");
2208 return -ENOMEM;
2211 ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
2212 if (ret != 1) {
2213 fprintf(stderr, "Error reading buffer: %d\n", errno);
2214 free(buffer);
2215 return -EIO;
2218 if (mdres->compress_method == COMPRESS_ZLIB) {
2219 size_t size = MAX_PENDING_SIZE * 2;
2220 u8 *tmp;
2222 tmp = malloc(MAX_PENDING_SIZE * 2);
2223 if (!tmp) {
2224 free(buffer);
2225 return -ENOMEM;
2227 ret = uncompress(tmp, (unsigned long *)&size,
2228 buffer, le32_to_cpu(item->size));
2229 if (ret != Z_OK) {
2230 fprintf(stderr, "Error decompressing %d\n", ret);
2231 free(buffer);
2232 free(tmp);
2233 return -EIO;
2235 free(buffer);
2236 buffer = tmp;
2239 pthread_mutex_lock(&mdres->mutex);
2240 super = (struct btrfs_super_block *)buffer;
2241 chunk_root_bytenr = btrfs_super_chunk_root(super);
2242 mdres->leafsize = btrfs_super_leafsize(super);
2243 memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
2244 memcpy(mdres->uuid, super->dev_item.uuid,
2245 BTRFS_UUID_SIZE);
2246 mdres->devid = le64_to_cpu(super->dev_item.devid);
2247 free(buffer);
2248 pthread_mutex_unlock(&mdres->mutex);
2250 return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
2253 static int __restore_metadump(const char *input, FILE *out, int old_restore,
2254 int num_threads, int fixup_offset,
2255 const char *target, int multi_devices)
2257 struct meta_cluster *cluster = NULL;
2258 struct meta_cluster_header *header;
2259 struct mdrestore_struct mdrestore;
2260 struct btrfs_fs_info *info = NULL;
2261 u64 bytenr = 0;
2262 FILE *in = NULL;
2263 int ret = 0;
2265 if (!strcmp(input, "-")) {
2266 in = stdin;
2267 } else {
2268 in = fopen(input, "r");
2269 if (!in) {
2270 perror("unable to open metadump image");
2271 return 1;
2275 /* NOTE: open with write mode */
2276 if (fixup_offset) {
2277 BUG_ON(!target);
2278 info = open_ctree_fs_info(target, 0, 0,
2279 OPEN_CTREE_WRITES |
2280 OPEN_CTREE_RESTORE |
2281 OPEN_CTREE_PARTIAL);
2282 if (!info) {
2283 fprintf(stderr, "%s: open ctree failed\n", __func__);
2284 ret = -EIO;
2285 goto failed_open;
2289 cluster = malloc(BLOCK_SIZE);
2290 if (!cluster) {
2291 fprintf(stderr, "Error allocating cluster\n");
2292 ret = -ENOMEM;
2293 goto failed_info;
2296 ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
2297 fixup_offset, info, multi_devices);
2298 if (ret) {
2299 fprintf(stderr, "Error initing mdrestore %d\n", ret);
2300 goto failed_cluster;
2303 if (!multi_devices && !old_restore) {
2304 ret = build_chunk_tree(&mdrestore, cluster);
2305 if (ret)
2306 goto out;
2309 if (in != stdin && fseek(in, 0, SEEK_SET)) {
2310 fprintf(stderr, "Error seeking %d\n", errno);
2311 goto out;
2314 while (1) {
2315 ret = fread(cluster, BLOCK_SIZE, 1, in);
2316 if (!ret)
2317 break;
2319 header = &cluster->header;
2320 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
2321 le64_to_cpu(header->bytenr) != bytenr) {
2322 fprintf(stderr, "bad header in metadump image\n");
2323 ret = -EIO;
2324 break;
2326 ret = add_cluster(cluster, &mdrestore, &bytenr);
2327 if (ret) {
2328 fprintf(stderr, "Error adding cluster\n");
2329 break;
2332 ret = wait_for_worker(&mdrestore);
2333 if (ret) {
2334 fprintf(stderr, "One of the threads errored out %d\n",
2335 ret);
2336 break;
2339 out:
2340 mdrestore_destroy(&mdrestore, num_threads);
2341 failed_cluster:
2342 free(cluster);
2343 failed_info:
2344 if (fixup_offset && info)
2345 close_ctree(info->chunk_root);
2346 failed_open:
2347 if (in != stdin)
2348 fclose(in);
2349 return ret;
2352 static int restore_metadump(const char *input, FILE *out, int old_restore,
2353 int num_threads, int multi_devices)
2355 return __restore_metadump(input, out, old_restore, num_threads, 0, NULL,
2356 multi_devices);
2359 static int fixup_metadump(const char *input, FILE *out, int num_threads,
2360 const char *target)
2362 return __restore_metadump(input, out, 0, num_threads, 1, target, 1);
2365 static int update_disk_super_on_device(struct btrfs_fs_info *info,
2366 const char *other_dev, u64 cur_devid)
2368 struct btrfs_key key;
2369 struct extent_buffer *leaf;
2370 struct btrfs_path path;
2371 struct btrfs_dev_item *dev_item;
2372 struct btrfs_super_block *disk_super;
2373 char dev_uuid[BTRFS_UUID_SIZE];
2374 char fs_uuid[BTRFS_UUID_SIZE];
2375 u64 devid, type, io_align, io_width;
2376 u64 sector_size, total_bytes, bytes_used;
2377 char *buf;
2378 int fp;
2379 int ret;
2381 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2382 key.type = BTRFS_DEV_ITEM_KEY;
2383 key.offset = cur_devid;
2385 btrfs_init_path(&path);
2386 ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
2387 if (ret) {
2388 fprintf(stderr, "search key fails\n");
2389 exit(1);
2392 leaf = path.nodes[0];
2393 dev_item = btrfs_item_ptr(leaf, path.slots[0],
2394 struct btrfs_dev_item);
2396 devid = btrfs_device_id(leaf, dev_item);
2397 if (devid != cur_devid) {
2398 printk("devid %llu mismatch with %llu\n", devid, cur_devid);
2399 exit(1);
2402 type = btrfs_device_type(leaf, dev_item);
2403 io_align = btrfs_device_io_align(leaf, dev_item);
2404 io_width = btrfs_device_io_width(leaf, dev_item);
2405 sector_size = btrfs_device_sector_size(leaf, dev_item);
2406 total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2407 bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2408 read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
2409 read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
2411 btrfs_release_path(&path);
2413 printk("update disk super on %s devid=%llu\n", other_dev, devid);
2415 /* update other devices' super block */
2416 fp = open(other_dev, O_CREAT | O_RDWR, 0600);
2417 if (fp < 0) {
2418 fprintf(stderr, "could not open %s\n", other_dev);
2419 exit(1);
2422 buf = malloc(BTRFS_SUPER_INFO_SIZE);
2423 if (!buf) {
2424 ret = -ENOMEM;
2425 close(fp);
2426 return ret;
2429 memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
2431 disk_super = (struct btrfs_super_block *)buf;
2432 dev_item = &disk_super->dev_item;
2434 btrfs_set_stack_device_type(dev_item, type);
2435 btrfs_set_stack_device_id(dev_item, devid);
2436 btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
2437 btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
2438 btrfs_set_stack_device_io_align(dev_item, io_align);
2439 btrfs_set_stack_device_io_width(dev_item, io_width);
2440 btrfs_set_stack_device_sector_size(dev_item, sector_size);
2441 memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
2442 memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
2443 csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
2445 ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
2446 if (ret != BTRFS_SUPER_INFO_SIZE) {
2447 ret = -EIO;
2448 goto out;
2451 write_backup_supers(fp, (u8 *)buf);
2453 out:
2454 free(buf);
2455 close(fp);
2456 return 0;
2459 static void print_usage(void)
2461 fprintf(stderr, "usage: btrfs-image [options] source target\n");
2462 fprintf(stderr, "\t-r \trestore metadump image\n");
2463 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
2464 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
2465 fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
2466 fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
2467 fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
2468 fprintf(stderr, "\t-m \trestore for multiple devices\n");
2469 exit(1);
2472 int main(int argc, char *argv[])
2474 char *source;
2475 char *target;
2476 u64 num_threads = 0;
2477 u64 compress_level = 0;
2478 int create = 1;
2479 int old_restore = 0;
2480 int walk_trees = 0;
2481 int multi_devices = 0;
2482 int ret;
2483 int sanitize = 0;
2484 int dev_cnt = 0;
2485 int usage_error = 0;
2486 FILE *out;
2488 while (1) {
2489 int c = getopt(argc, argv, "rc:t:oswm");
2490 if (c < 0)
2491 break;
2492 switch (c) {
2493 case 'r':
2494 create = 0;
2495 break;
2496 case 't':
2497 num_threads = arg_strtou64(optarg);
2498 if (num_threads > 32)
2499 print_usage();
2500 break;
2501 case 'c':
2502 compress_level = arg_strtou64(optarg);
2503 if (compress_level > 9)
2504 print_usage();
2505 break;
2506 case 'o':
2507 old_restore = 1;
2508 break;
2509 case 's':
2510 sanitize++;
2511 break;
2512 case 'w':
2513 walk_trees = 1;
2514 break;
2515 case 'm':
2516 create = 0;
2517 multi_devices = 1;
2518 break;
2519 default:
2520 print_usage();
2524 argc = argc - optind;
2525 set_argv0(argv);
2526 if (check_argc_min(argc, 2))
2527 print_usage();
2529 dev_cnt = argc - 1;
2531 if (create) {
2532 if (old_restore) {
2533 fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
2534 usage_error++;
2536 } else {
2537 if (walk_trees || sanitize || compress_level) {
2538 fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
2539 usage_error++;
2541 if (multi_devices && dev_cnt < 2) {
2542 fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
2543 usage_error++;
2545 if (!multi_devices && dev_cnt != 1) {
2546 fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
2547 usage_error++;
2551 if (usage_error)
2552 print_usage();
2554 source = argv[optind];
2555 target = argv[optind + 1];
2557 if (create && !strcmp(target, "-")) {
2558 out = stdout;
2559 } else {
2560 out = fopen(target, "w+");
2561 if (!out) {
2562 perror("unable to create target file");
2563 exit(1);
2567 if (num_threads == 0 && compress_level > 0) {
2568 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
2569 if (num_threads <= 0)
2570 num_threads = 1;
2573 if (create) {
2574 ret = check_mounted(source);
2575 if (ret < 0) {
2576 fprintf(stderr, "Could not check mount status: %s\n",
2577 strerror(-ret));
2578 exit(1);
2579 } else if (ret)
2580 fprintf(stderr,
2581 "WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
2583 ret = create_metadump(source, out, num_threads,
2584 compress_level, sanitize, walk_trees);
2585 } else {
2586 ret = restore_metadump(source, out, old_restore, 1,
2587 multi_devices);
2589 if (ret) {
2590 printk("%s failed (%s)\n", (create) ? "create" : "restore",
2591 strerror(errno));
2592 goto out;
2595 /* extended support for multiple devices */
2596 if (!create && multi_devices) {
2597 struct btrfs_fs_info *info;
2598 u64 total_devs;
2599 int i;
2601 info = open_ctree_fs_info(target, 0, 0,
2602 OPEN_CTREE_PARTIAL |
2603 OPEN_CTREE_RESTORE);
2604 if (!info) {
2605 int e = errno;
2606 fprintf(stderr, "unable to open %s error = %s\n",
2607 target, strerror(e));
2608 return 1;
2611 total_devs = btrfs_super_num_devices(info->super_copy);
2612 if (total_devs != dev_cnt) {
2613 printk("it needs %llu devices but has only %d\n",
2614 total_devs, dev_cnt);
2615 close_ctree(info->chunk_root);
2616 goto out;
2619 /* update super block on other disks */
2620 for (i = 2; i <= dev_cnt; i++) {
2621 ret = update_disk_super_on_device(info,
2622 argv[optind + i], (u64)i);
2623 if (ret) {
2624 printk("update disk super failed devid=%d (error=%d)\n",
2625 i, ret);
2626 close_ctree(info->chunk_root);
2627 exit(1);
2631 close_ctree(info->chunk_root);
2633 /* fix metadata block to map correct chunk */
2634 ret = fixup_metadump(source, out, 1, target);
2635 if (ret) {
2636 fprintf(stderr, "fix metadump failed (error=%d)\n",
2637 ret);
2638 exit(1);
2642 out:
2643 if (out == stdout) {
2644 fflush(out);
2645 } else {
2646 fclose(out);
2647 if (ret && create) {
2648 int unlink_ret;
2650 unlink_ret = unlink(target);
2651 if (unlink_ret)
2652 fprintf(stderr,
2653 "unlink output file failed : %s\n",
2654 strerror(errno));
2658 return !!ret;