Fix inode link count checks in btrfsck
[btrfs-progs-unstable.git] / btrfs-image.c
blobf2bbcc83386e3a5193e1cfb694890fc93249e617
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #define _XOPEN_SOURCE 500
20 #define _GNU_SOURCE 1
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <dirent.h>
29 #include <zlib.h>
30 #include "kerncompat.h"
31 #include "crc32c.h"
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "utils.h"
36 #include "version.h"
39 #define HEADER_MAGIC 0xbd5c25e27295668bULL
40 #define MAX_PENDING_SIZE (256 * 1024)
41 #define BLOCK_SIZE 1024
42 #define BLOCK_MASK (BLOCK_SIZE - 1)
44 #define COMPRESS_NONE 0
45 #define COMPRESS_ZLIB 1
47 struct meta_cluster_item {
48 __le64 bytenr;
49 __le32 size;
50 } __attribute__ ((__packed__));
52 struct meta_cluster_header {
53 __le64 magic;
54 __le64 bytenr;
55 __le32 nritems;
56 u8 compress;
57 } __attribute__ ((__packed__));
59 /* cluster header + index items + buffers */
60 struct meta_cluster {
61 struct meta_cluster_header header;
62 struct meta_cluster_item items[];
63 } __attribute__ ((__packed__));
65 #define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
66 sizeof(struct meta_cluster_item))
68 struct async_work {
69 struct list_head list;
70 struct list_head ordered;
71 u64 start;
72 u64 size;
73 u8 *buffer;
74 size_t bufsize;
77 struct metadump_struct {
78 struct btrfs_root *root;
79 FILE *out;
81 struct meta_cluster *cluster;
83 pthread_t *threads;
84 size_t num_threads;
85 pthread_mutex_t mutex;
86 pthread_cond_t cond;
88 struct list_head list;
89 struct list_head ordered;
90 size_t num_items;
91 size_t num_ready;
93 u64 pending_start;
94 u64 pending_size;
96 int compress_level;
97 int done;
100 struct mdrestore_struct {
101 FILE *in;
102 FILE *out;
104 pthread_t *threads;
105 size_t num_threads;
106 pthread_mutex_t mutex;
107 pthread_cond_t cond;
109 struct list_head list;
110 size_t num_items;
112 int compress_method;
113 int done;
116 static void csum_block(u8 *buf, size_t len)
118 char result[BTRFS_CRC32_SIZE];
119 u32 crc = ~(u32)0;
120 crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
121 btrfs_csum_final(crc, result);
122 memcpy(buf, result, BTRFS_CRC32_SIZE);
126 * zero inline extents and csum items
128 static void zero_items(u8 *dst, struct extent_buffer *src)
130 struct btrfs_file_extent_item *fi;
131 struct btrfs_item *item;
132 struct btrfs_key key;
133 u32 nritems = btrfs_header_nritems(src);
134 size_t size;
135 unsigned long ptr;
136 int i, extent_type;
138 for (i = 0; i < nritems; i++) {
139 item = btrfs_item_nr(src, i);
140 btrfs_item_key_to_cpu(src, &key, i);
141 if (key.type == BTRFS_CSUM_ITEM_KEY) {
142 size = btrfs_item_size_nr(src, i);
143 memset(dst + btrfs_leaf_data(src) +
144 btrfs_item_offset_nr(src, i), 0, size);
145 continue;
147 if (key.type != BTRFS_EXTENT_DATA_KEY)
148 continue;
150 fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
151 extent_type = btrfs_file_extent_type(src, fi);
152 if (extent_type != BTRFS_FILE_EXTENT_INLINE)
153 continue;
155 ptr = btrfs_file_extent_inline_start(fi);
156 size = btrfs_file_extent_inline_item_len(src, item);
157 memset(dst + ptr, 0, size);
162 * copy buffer and zero useless data in the buffer
164 static void copy_buffer(u8 *dst, struct extent_buffer *src)
166 int level;
167 size_t size;
168 u32 nritems;
170 memcpy(dst, src->data, src->len);
171 if (src->start == BTRFS_SUPER_INFO_OFFSET)
172 return;
174 level = btrfs_header_level(src);
175 nritems = btrfs_header_nritems(src);
177 if (nritems == 0) {
178 size = sizeof(struct btrfs_header);
179 memset(dst + size, 0, src->len - size);
180 } else if (level == 0) {
181 size = btrfs_leaf_data(src) +
182 btrfs_item_offset_nr(src, nritems - 1) -
183 btrfs_item_nr_offset(nritems);
184 memset(dst + btrfs_item_nr_offset(nritems), 0, size);
185 zero_items(dst, src);
186 } else {
187 size = offsetof(struct btrfs_node, ptrs) +
188 sizeof(struct btrfs_key_ptr) * nritems;
189 memset(dst + size, 0, src->len - size);
191 csum_block(dst, src->len);
194 static void *dump_worker(void *data)
196 struct metadump_struct *md = (struct metadump_struct *)data;
197 struct async_work *async;
198 int ret;
200 while (1) {
201 pthread_mutex_lock(&md->mutex);
202 while (list_empty(&md->list)) {
203 if (md->done) {
204 pthread_mutex_unlock(&md->mutex);
205 goto out;
207 pthread_cond_wait(&md->cond, &md->mutex);
209 async = list_entry(md->list.next, struct async_work, list);
210 list_del_init(&async->list);
211 pthread_mutex_unlock(&md->mutex);
213 if (md->compress_level > 0) {
214 u8 *orig = async->buffer;
216 async->bufsize = compressBound(async->size);
217 async->buffer = malloc(async->bufsize);
219 ret = compress2(async->buffer,
220 (unsigned long *)&async->bufsize,
221 orig, async->size, md->compress_level);
222 BUG_ON(ret != Z_OK);
224 free(orig);
227 pthread_mutex_lock(&md->mutex);
228 md->num_ready++;
229 pthread_mutex_unlock(&md->mutex);
231 out:
232 pthread_exit(NULL);
235 static void meta_cluster_init(struct metadump_struct *md, u64 start)
237 struct meta_cluster_header *header;
239 md->num_items = 0;
240 md->num_ready = 0;
241 header = &md->cluster->header;
242 header->magic = cpu_to_le64(HEADER_MAGIC);
243 header->bytenr = cpu_to_le64(start);
244 header->nritems = cpu_to_le32(0);
245 header->compress = md->compress_level > 0 ?
246 COMPRESS_ZLIB : COMPRESS_NONE;
249 static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
250 FILE *out, int num_threads, int compress_level)
252 int i, ret;
254 memset(md, 0, sizeof(*md));
255 pthread_cond_init(&md->cond, NULL);
256 pthread_mutex_init(&md->mutex, NULL);
257 INIT_LIST_HEAD(&md->list);
258 INIT_LIST_HEAD(&md->ordered);
259 md->root = root;
260 md->out = out;
261 md->pending_start = (u64)-1;
262 md->compress_level = compress_level;
263 md->cluster = calloc(1, BLOCK_SIZE);
264 if (!md->cluster)
265 return -ENOMEM;
267 meta_cluster_init(md, 0);
268 if (!num_threads)
269 return 0;
271 md->num_threads = num_threads;
272 md->threads = calloc(num_threads, sizeof(pthread_t));
273 if (!md->threads)
274 return -ENOMEM;
275 for (i = 0; i < num_threads; i++) {
276 ret = pthread_create(md->threads + i, NULL, dump_worker, md);
277 if (ret)
278 break;
280 return ret;
283 static void metadump_destroy(struct metadump_struct *md)
285 int i;
286 pthread_mutex_lock(&md->mutex);
287 md->done = 1;
288 pthread_cond_broadcast(&md->cond);
289 pthread_mutex_unlock(&md->mutex);
291 for (i = 0; i < md->num_threads; i++)
292 pthread_join(md->threads[i], NULL);
294 pthread_cond_destroy(&md->cond);
295 pthread_mutex_destroy(&md->mutex);
296 free(md->threads);
297 free(md->cluster);
300 static int write_zero(FILE *out, size_t size)
302 static char zero[BLOCK_SIZE];
303 return fwrite(zero, size, 1, out);
306 static int write_buffers(struct metadump_struct *md, u64 *next)
308 struct meta_cluster_header *header = &md->cluster->header;
309 struct meta_cluster_item *item;
310 struct async_work *async;
311 u64 bytenr = 0;
312 u32 nritems = 0;
313 int ret;
315 if (list_empty(&md->ordered))
316 goto out;
318 /* wait until all buffers are compressed */
319 while (md->num_items > md->num_ready) {
320 struct timespec ts = {
321 .tv_sec = 0,
322 .tv_nsec = 10000000,
324 pthread_mutex_unlock(&md->mutex);
325 nanosleep(&ts, NULL);
326 pthread_mutex_lock(&md->mutex);
329 /* setup and write index block */
330 list_for_each_entry(async, &md->ordered, ordered) {
331 item = md->cluster->items + nritems;
332 item->bytenr = cpu_to_le64(async->start);
333 item->size = cpu_to_le32(async->bufsize);
334 nritems++;
336 header->nritems = cpu_to_le32(nritems);
338 ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
339 BUG_ON(ret != 1);
341 /* write buffers */
342 bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
343 while (!list_empty(&md->ordered)) {
344 async = list_entry(md->ordered.next, struct async_work,
345 ordered);
346 list_del_init(&async->ordered);
348 bytenr += async->bufsize;
349 ret = fwrite(async->buffer, async->bufsize, 1, md->out);
350 BUG_ON(ret != 1);
352 free(async->buffer);
353 free(async);
356 /* zero unused space in the last block */
357 if (bytenr & BLOCK_MASK) {
358 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
360 bytenr += size;
361 ret = write_zero(md->out, size);
362 BUG_ON(ret != 1);
364 out:
365 *next = bytenr;
366 return 0;
369 static int flush_pending(struct metadump_struct *md, int done)
371 struct async_work *async = NULL;
372 struct extent_buffer *eb;
373 u64 blocksize = md->root->nodesize;
374 u64 start;
375 u64 size;
376 size_t offset;
377 int ret;
379 if (md->pending_size) {
380 async = calloc(1, sizeof(*async));
381 if (!async)
382 return -ENOMEM;
384 async->start = md->pending_start;
385 async->size = md->pending_size;
386 async->bufsize = async->size;
387 async->buffer = malloc(async->bufsize);
389 offset = 0;
390 start = async->start;
391 size = async->size;
392 while (size > 0) {
393 eb = read_tree_block(md->root, start, blocksize, 0);
394 BUG_ON(!eb);
395 copy_buffer(async->buffer + offset, eb);
396 free_extent_buffer(eb);
397 start += blocksize;
398 offset += blocksize;
399 size -= blocksize;
402 md->pending_start = (u64)-1;
403 md->pending_size = 0;
404 } else if (!done) {
405 return 0;
408 pthread_mutex_lock(&md->mutex);
409 if (async) {
410 list_add_tail(&async->ordered, &md->ordered);
411 md->num_items++;
412 if (md->compress_level > 0) {
413 list_add_tail(&async->list, &md->list);
414 pthread_cond_signal(&md->cond);
415 } else {
416 md->num_ready++;
419 if (md->num_items >= ITEMS_PER_CLUSTER || done) {
420 ret = write_buffers(md, &start);
421 BUG_ON(ret);
422 meta_cluster_init(md, start);
424 pthread_mutex_unlock(&md->mutex);
425 return 0;
428 static int add_metadata(u64 start, u64 size, struct metadump_struct *md)
430 int ret;
431 if (md->pending_size + size > MAX_PENDING_SIZE ||
432 md->pending_start + md->pending_size != start) {
433 ret = flush_pending(md, 0);
434 if (ret)
435 return ret;
436 md->pending_start = start;
438 readahead_tree_block(md->root, start, size, 0);
439 md->pending_size += size;
440 return 0;
443 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
444 static int is_tree_block(struct btrfs_root *extent_root,
445 struct btrfs_path *path, u64 bytenr)
447 struct extent_buffer *leaf;
448 struct btrfs_key key;
449 u64 ref_objectid;
450 int ret;
452 leaf = path->nodes[0];
453 while (1) {
454 struct btrfs_extent_ref_v0 *ref_item;
455 path->slots[0]++;
456 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
457 ret = btrfs_next_leaf(extent_root, path);
458 BUG_ON(ret < 0);
459 if (ret > 0)
460 break;
461 leaf = path->nodes[0];
463 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
464 if (key.objectid != bytenr)
465 break;
466 if (key.type != BTRFS_EXTENT_REF_V0_KEY)
467 continue;
468 ref_item = btrfs_item_ptr(leaf, path->slots[0],
469 struct btrfs_extent_ref_v0);
470 ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
471 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
472 return 1;
473 break;
475 return 0;
477 #endif
479 static int create_metadump(const char *input, FILE *out, int num_threads,
480 int compress_level)
482 struct btrfs_root *root;
483 struct btrfs_root *extent_root;
484 struct btrfs_path *path;
485 struct extent_buffer *leaf;
486 struct btrfs_extent_item *ei;
487 struct btrfs_key key;
488 struct metadump_struct metadump;
489 u64 bytenr;
490 u64 num_bytes;
491 int ret;
493 root = open_ctree(input, 0, 0);
494 BUG_ON(root->nodesize != root->leafsize);
496 ret = metadump_init(&metadump, root, out, num_threads,
497 compress_level);
498 BUG_ON(ret);
500 ret = add_metadata(BTRFS_SUPER_INFO_OFFSET, 4096, &metadump);
501 BUG_ON(ret);
503 extent_root = root->fs_info->extent_root;
504 path = btrfs_alloc_path();
506 bytenr = BTRFS_SUPER_INFO_OFFSET + 4096;
507 key.objectid = bytenr;
508 key.type = BTRFS_EXTENT_ITEM_KEY;
509 key.offset = 0;
511 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
512 BUG_ON(ret < 0);
514 while (1) {
515 leaf = path->nodes[0];
516 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
517 ret = btrfs_next_leaf(extent_root, path);
518 BUG_ON(ret < 0);
519 if (ret > 0)
520 break;
521 leaf = path->nodes[0];
524 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
525 if (key.objectid < bytenr ||
526 key.type != BTRFS_EXTENT_ITEM_KEY) {
527 path->slots[0]++;
528 continue;
531 bytenr = key.objectid;
532 num_bytes = key.offset;
534 if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
535 ei = btrfs_item_ptr(leaf, path->slots[0],
536 struct btrfs_extent_item);
537 if (btrfs_extent_flags(leaf, ei) &
538 BTRFS_EXTENT_FLAG_TREE_BLOCK) {
539 ret = add_metadata(bytenr, num_bytes,
540 &metadump);
541 BUG_ON(ret);
543 } else {
544 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
545 if (is_tree_block(extent_root, path, bytenr)) {
546 ret = add_metadata(bytenr, num_bytes,
547 &metadump);
548 BUG_ON(ret);
550 #else
551 BUG_ON(1);
552 #endif
554 bytenr += num_bytes;
557 ret = flush_pending(&metadump, 1);
558 BUG_ON(ret);
560 metadump_destroy(&metadump);
562 btrfs_free_path(path);
563 ret = close_ctree(root);
564 return 0;
567 static void update_super(u8 *buffer)
569 struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
570 struct btrfs_chunk *chunk;
571 struct btrfs_disk_key *key;
572 u32 sectorsize = btrfs_super_sectorsize(super);
573 u64 flags = btrfs_super_flags(super);
575 flags |= BTRFS_SUPER_FLAG_METADUMP;
576 btrfs_set_super_flags(super, flags);
578 key = (struct btrfs_disk_key *)(super->sys_chunk_array);
579 chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
580 sizeof(struct btrfs_disk_key));
582 btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
583 btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
584 btrfs_set_disk_key_offset(key, 0);
586 btrfs_set_stack_chunk_length(chunk, (u64)-1);
587 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
588 btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
589 btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
590 btrfs_set_stack_chunk_io_align(chunk, sectorsize);
591 btrfs_set_stack_chunk_io_width(chunk, sectorsize);
592 btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
593 btrfs_set_stack_chunk_num_stripes(chunk, 1);
594 btrfs_set_stack_chunk_sub_stripes(chunk, 0);
595 chunk->stripe.devid = super->dev_item.devid;
596 chunk->stripe.offset = cpu_to_le64(0);
597 memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
598 btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
599 csum_block(buffer, 4096);
602 static void *restore_worker(void *data)
604 struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
605 struct async_work *async;
606 size_t size;
607 u8 *buffer;
608 u8 *outbuf;
609 int outfd;
610 int ret;
612 outfd = fileno(mdres->out);
613 buffer = malloc(MAX_PENDING_SIZE * 2);
614 BUG_ON(!buffer);
616 while (1) {
617 pthread_mutex_lock(&mdres->mutex);
618 while (list_empty(&mdres->list)) {
619 if (mdres->done) {
620 pthread_mutex_unlock(&mdres->mutex);
621 goto out;
623 pthread_cond_wait(&mdres->cond, &mdres->mutex);
625 async = list_entry(mdres->list.next, struct async_work, list);
626 list_del_init(&async->list);
627 pthread_mutex_unlock(&mdres->mutex);
629 if (mdres->compress_method == COMPRESS_ZLIB) {
630 size = MAX_PENDING_SIZE * 2;
631 ret = uncompress(buffer, (unsigned long *)&size,
632 async->buffer, async->bufsize);
633 BUG_ON(ret != Z_OK);
634 outbuf = buffer;
635 } else {
636 outbuf = async->buffer;
637 size = async->bufsize;
640 if (async->start == BTRFS_SUPER_INFO_OFFSET)
641 update_super(outbuf);
643 ret = pwrite64(outfd, outbuf, size, async->start);
644 BUG_ON(ret != size);
646 pthread_mutex_lock(&mdres->mutex);
647 mdres->num_items--;
648 pthread_mutex_unlock(&mdres->mutex);
650 free(async->buffer);
651 free(async);
653 out:
654 free(buffer);
655 pthread_exit(NULL);
658 static int mdresotre_init(struct mdrestore_struct *mdres,
659 FILE *in, FILE *out, int num_threads)
661 int i, ret = 0;
663 memset(mdres, 0, sizeof(*mdres));
664 pthread_cond_init(&mdres->cond, NULL);
665 pthread_mutex_init(&mdres->mutex, NULL);
666 INIT_LIST_HEAD(&mdres->list);
667 mdres->in = in;
668 mdres->out = out;
670 if (!num_threads)
671 return 0;
673 mdres->num_threads = num_threads;
674 mdres->threads = calloc(num_threads, sizeof(pthread_t));
675 if (!mdres->threads)
676 return -ENOMEM;
677 for (i = 0; i < num_threads; i++) {
678 ret = pthread_create(mdres->threads + i, NULL, restore_worker,
679 mdres);
680 if (ret)
681 break;
683 return ret;
686 static void mdresotre_destroy(struct mdrestore_struct *mdres)
688 int i;
689 pthread_mutex_lock(&mdres->mutex);
690 mdres->done = 1;
691 pthread_cond_broadcast(&mdres->cond);
692 pthread_mutex_unlock(&mdres->mutex);
694 for (i = 0; i < mdres->num_threads; i++)
695 pthread_join(mdres->threads[i], NULL);
697 pthread_cond_destroy(&mdres->cond);
698 pthread_mutex_destroy(&mdres->mutex);
699 free(mdres->threads);
702 static int add_cluster(struct meta_cluster *cluster,
703 struct mdrestore_struct *mdres, u64 *next)
705 struct meta_cluster_item *item;
706 struct meta_cluster_header *header = &cluster->header;
707 struct async_work *async;
708 u64 bytenr;
709 u32 i, nritems;
710 int ret;
712 BUG_ON(mdres->num_items);
713 mdres->compress_method = header->compress;
715 bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
716 nritems = le32_to_cpu(header->nritems);
717 for (i = 0; i < nritems; i++) {
718 item = &cluster->items[i];
719 async = calloc(1, sizeof(*async));
720 async->start = le64_to_cpu(item->bytenr);
721 async->bufsize = le32_to_cpu(item->size);
722 async->buffer = malloc(async->bufsize);
723 ret = fread(async->buffer, async->bufsize, 1, mdres->in);
724 BUG_ON(ret != 1);
725 bytenr += async->bufsize;
727 pthread_mutex_lock(&mdres->mutex);
728 list_add_tail(&async->list, &mdres->list);
729 mdres->num_items++;
730 pthread_cond_signal(&mdres->cond);
731 pthread_mutex_unlock(&mdres->mutex);
733 if (bytenr & BLOCK_MASK) {
734 char buffer[BLOCK_MASK];
735 size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
737 bytenr += size;
738 ret = fread(buffer, size, 1, mdres->in);
739 BUG_ON(ret != 1);
741 *next = bytenr;
742 return 0;
745 static int wait_for_worker(struct mdrestore_struct *mdres)
747 pthread_mutex_lock(&mdres->mutex);
748 while (mdres->num_items > 0) {
749 struct timespec ts = {
750 .tv_sec = 0,
751 .tv_nsec = 10000000,
753 pthread_mutex_unlock(&mdres->mutex);
754 nanosleep(&ts, NULL);
755 pthread_mutex_lock(&mdres->mutex);
757 pthread_mutex_unlock(&mdres->mutex);
758 return 0;
761 static int restore_metadump(const char *input, FILE *out, int num_threads)
763 struct meta_cluster *cluster;
764 struct meta_cluster_header *header;
765 struct mdrestore_struct mdrestore;
766 u64 bytenr = 0;
767 FILE *in;
768 int ret;
770 if (!strcmp(input, "-")) {
771 in = stdin;
772 } else {
773 in = fopen(input, "r");
774 if (!in) {
775 perror("unable to open metadump image");
776 return 1;
780 cluster = malloc(BLOCK_SIZE);
781 BUG_ON(!cluster);
783 ret = mdresotre_init(&mdrestore, in, out, num_threads);
784 BUG_ON(ret);
786 while (1) {
787 ret = fread(cluster, BLOCK_SIZE, 1, in);
788 if (!ret)
789 break;
791 header = &cluster->header;
792 if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
793 le64_to_cpu(header->bytenr) != bytenr) {
794 fprintf(stderr, "bad header in metadump image\n");
795 return 1;
797 ret = add_cluster(cluster, &mdrestore, &bytenr);
798 BUG_ON(ret);
800 wait_for_worker(&mdrestore);
803 mdresotre_destroy(&mdrestore);
804 free(cluster);
805 if (in != stdin)
806 fclose(in);
807 return ret;
810 static void print_usage(void)
812 fprintf(stderr, "usage: btrfs-image [options] source target\n");
813 fprintf(stderr, "\t-r \trestore metadump image\n");
814 fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
815 fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
816 exit(1);
819 int main(int argc, char *argv[])
821 char *source;
822 char *target;
823 int num_threads = 0;
824 int compress_level = 0;
825 int create = 1;
826 int ret;
827 FILE *out;
829 while (1) {
830 int c = getopt(argc, argv, "rc:t:");
831 if (c < 0)
832 break;
833 switch (c) {
834 case 'r':
835 create = 0;
836 break;
837 case 't':
838 num_threads = atoi(optarg);
839 if (num_threads <= 0 || num_threads > 32)
840 print_usage();
841 break;
842 case 'c':
843 compress_level = atoi(optarg);
844 if (compress_level < 0 || compress_level > 9)
845 print_usage();
846 break;
847 default:
848 print_usage();
852 argc = argc - optind;
853 if (argc != 2)
854 print_usage();
855 source = argv[optind];
856 target = argv[optind + 1];
858 if (create && !strcmp(target, "-")) {
859 out = stdout;
860 } else {
861 out = fopen(target, "w+");
862 if (!out) {
863 perror("unable to create target file");
864 exit(1);
868 if (num_threads == 0 && compress_level > 0) {
869 num_threads = sysconf(_SC_NPROCESSORS_ONLN);
870 if (num_threads <= 0)
871 num_threads = 1;
874 if (create)
875 ret = create_metadump(source, out, num_threads,
876 compress_level);
877 else
878 ret = restore_metadump(source, out, 1);
880 if (out == stdout)
881 fflush(out);
882 else
883 fclose(out);
885 exit(ret);