2 * Copyright (C) STRATO AG 2011. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 * This module can be used to catch cases when the btrfs kernel
21 * code executes write requests to the disk that bring the file
22 * system in an inconsistent state. In such a state, a power-loss
23 * or kernel panic event would cause that the data on disk is
24 * lost or at least damaged.
26 * Code is added that examines all block write requests during
27 * runtime (including writes of the super block). Three rules
28 * are verified and an error is printed on violation of the
30 * 1. It is not allowed to write a disk block which is
31 * currently referenced by the super block (either directly
33 * 2. When a super block is written, it is verified that all
34 * referenced (directly or indirectly) blocks fulfill the
35 * following requirements:
36 * 2a. All referenced blocks have either been present when
37 * the file system was mounted, (i.e., they have been
38 * referenced by the super block) or they have been
39 * written since then and the write completion callback
40 * was called and a FLUSH request to the device where
41 * these blocks are located was received and completed.
42 * 2b. All referenced blocks need to have a generation
43 * number which is equal to the parent's number.
45 * One issue that was found using this module was that the log
46 * tree on disk became temporarily corrupted because disk blocks
47 * that had been in use for the log tree had been freed and
48 * reused too early, while being referenced by the written super
51 * The search term in the kernel log that can be used to filter
52 * on the existence of detected integrity issues is
55 * The integrity check is enabled via mount options. These
56 * mount options are only supported if the integrity check
57 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
59 * Example #1, apply integrity checks to all metadata:
60 * mount /dev/sdb1 /mnt -o check_int
62 * Example #2, apply integrity checks to all metadata and
64 * mount /dev/sdb1 /mnt -o check_int_data
66 * Example #3, apply integrity checks to all metadata and dump
67 * the tree that the super block references to kernel messages
68 * each time after a super block was written:
69 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
71 * If the integrity check tool is included and activated in
72 * the mount options, plenty of kernel memory is used, and
73 * plenty of additional CPU cycles are spent. Enabling this
74 * functionality is not intended for normal use. In most
75 * cases, unless you are a btrfs developer who needs to verify
76 * the integrity of (super)-block write requests, do not
77 * enable the config option BTRFS_FS_CHECK_INTEGRITY to
78 * include and compile the integrity check tool.
81 #include <linux/sched.h>
82 #include <linux/slab.h>
83 #include <linux/buffer_head.h>
84 #include <linux/mutex.h>
85 #include <linux/crc32c.h>
86 #include <linux/genhd.h>
87 #include <linux/blkdev.h>
90 #include "transaction.h"
91 #include "extent_io.h"
93 #include "print-tree.h"
95 #include "check-integrity.h"
97 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
98 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
99 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
100 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
101 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
102 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
103 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
104 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters,
105 * excluding " [...]" */
106 #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
109 * The definition of the bitmask fields for the print_mask.
110 * They are specified with the mount option check_integrity_print_mask.
112 #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001
113 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002
114 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004
115 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008
116 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010
117 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020
118 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040
119 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080
120 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100
121 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200
122 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
123 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
124 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
126 struct btrfsic_dev_state
;
127 struct btrfsic_state
;
129 struct btrfsic_block
{
130 u32 magic_num
; /* only used for debug purposes */
131 unsigned int is_metadata
:1; /* if it is meta-data, not data-data */
132 unsigned int is_superblock
:1; /* if it is one of the superblocks */
133 unsigned int is_iodone
:1; /* if is done by lower subsystem */
134 unsigned int iodone_w_error
:1; /* error was indicated to endio */
135 unsigned int never_written
:1; /* block was added because it was
136 * referenced, not because it was
138 unsigned int mirror_num
:2; /* large enough to hold
139 * BTRFS_SUPER_MIRROR_MAX */
140 struct btrfsic_dev_state
*dev_state
;
141 u64 dev_bytenr
; /* key, physical byte num on disk */
142 u64 logical_bytenr
; /* logical byte num on disk */
144 struct btrfs_disk_key disk_key
; /* extra info to print in case of
145 * issues, will not always be correct */
146 struct list_head collision_resolving_node
; /* list node */
147 struct list_head all_blocks_node
; /* list node */
149 /* the following two lists contain block_link items */
150 struct list_head ref_to_list
; /* list */
151 struct list_head ref_from_list
; /* list */
152 struct btrfsic_block
*next_in_same_bio
;
153 void *orig_bio_bh_private
;
157 } orig_bio_bh_end_io
;
158 int submit_bio_bh_rw
;
159 u64 flush_gen
; /* only valid if !never_written */
163 * Elements of this type are allocated dynamically and required because
164 * each block object can refer to and can be ref from multiple blocks.
165 * The key to lookup them in the hashtable is the dev_bytenr of
166 * the block ref to plus the one from the block refered from.
167 * The fact that they are searchable via a hashtable and that a
168 * ref_cnt is maintained is not required for the btrfs integrity
169 * check algorithm itself, it is only used to make the output more
170 * beautiful in case that an error is detected (an error is defined
171 * as a write operation to a block while that block is still referenced).
173 struct btrfsic_block_link
{
174 u32 magic_num
; /* only used for debug purposes */
176 struct list_head node_ref_to
; /* list node */
177 struct list_head node_ref_from
; /* list node */
178 struct list_head collision_resolving_node
; /* list node */
179 struct btrfsic_block
*block_ref_to
;
180 struct btrfsic_block
*block_ref_from
;
181 u64 parent_generation
;
184 struct btrfsic_dev_state
{
185 u32 magic_num
; /* only used for debug purposes */
186 struct block_device
*bdev
;
187 struct btrfsic_state
*state
;
188 struct list_head collision_resolving_node
; /* list node */
189 struct btrfsic_block dummy_block_for_bio_bh_flush
;
191 char name
[BDEVNAME_SIZE
];
194 struct btrfsic_block_hashtable
{
195 struct list_head table
[BTRFSIC_BLOCK_HASHTABLE_SIZE
];
198 struct btrfsic_block_link_hashtable
{
199 struct list_head table
[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE
];
202 struct btrfsic_dev_state_hashtable
{
203 struct list_head table
[BTRFSIC_DEV2STATE_HASHTABLE_SIZE
];
206 struct btrfsic_block_data_ctx
{
207 u64 start
; /* virtual bytenr */
208 u64 dev_bytenr
; /* physical bytenr on device */
210 struct btrfsic_dev_state
*dev
;
216 /* This structure is used to implement recursion without occupying
217 * any stack space, refer to btrfsic_process_metablock() */
218 struct btrfsic_stack_frame
{
226 struct btrfsic_block
*block
;
227 struct btrfsic_block_data_ctx
*block_ctx
;
228 struct btrfsic_block
*next_block
;
229 struct btrfsic_block_data_ctx next_block_ctx
;
230 struct btrfs_header
*hdr
;
231 struct btrfsic_stack_frame
*prev
;
234 /* Some state per mounted filesystem */
235 struct btrfsic_state
{
237 int include_extent_data
;
239 struct list_head all_blocks_list
;
240 struct btrfsic_block_hashtable block_hashtable
;
241 struct btrfsic_block_link_hashtable block_link_hashtable
;
242 struct btrfs_root
*root
;
243 u64 max_superblock_generation
;
244 struct btrfsic_block
*latest_superblock
;
249 static void btrfsic_block_init(struct btrfsic_block
*b
);
250 static struct btrfsic_block
*btrfsic_block_alloc(void);
251 static void btrfsic_block_free(struct btrfsic_block
*b
);
252 static void btrfsic_block_link_init(struct btrfsic_block_link
*n
);
253 static struct btrfsic_block_link
*btrfsic_block_link_alloc(void);
254 static void btrfsic_block_link_free(struct btrfsic_block_link
*n
);
255 static void btrfsic_dev_state_init(struct btrfsic_dev_state
*ds
);
256 static struct btrfsic_dev_state
*btrfsic_dev_state_alloc(void);
257 static void btrfsic_dev_state_free(struct btrfsic_dev_state
*ds
);
258 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable
*h
);
259 static void btrfsic_block_hashtable_add(struct btrfsic_block
*b
,
260 struct btrfsic_block_hashtable
*h
);
261 static void btrfsic_block_hashtable_remove(struct btrfsic_block
*b
);
262 static struct btrfsic_block
*btrfsic_block_hashtable_lookup(
263 struct block_device
*bdev
,
265 struct btrfsic_block_hashtable
*h
);
266 static void btrfsic_block_link_hashtable_init(
267 struct btrfsic_block_link_hashtable
*h
);
268 static void btrfsic_block_link_hashtable_add(
269 struct btrfsic_block_link
*l
,
270 struct btrfsic_block_link_hashtable
*h
);
271 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link
*l
);
272 static struct btrfsic_block_link
*btrfsic_block_link_hashtable_lookup(
273 struct block_device
*bdev_ref_to
,
274 u64 dev_bytenr_ref_to
,
275 struct block_device
*bdev_ref_from
,
276 u64 dev_bytenr_ref_from
,
277 struct btrfsic_block_link_hashtable
*h
);
278 static void btrfsic_dev_state_hashtable_init(
279 struct btrfsic_dev_state_hashtable
*h
);
280 static void btrfsic_dev_state_hashtable_add(
281 struct btrfsic_dev_state
*ds
,
282 struct btrfsic_dev_state_hashtable
*h
);
283 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state
*ds
);
284 static struct btrfsic_dev_state
*btrfsic_dev_state_hashtable_lookup(
285 struct block_device
*bdev
,
286 struct btrfsic_dev_state_hashtable
*h
);
287 static struct btrfsic_stack_frame
*btrfsic_stack_frame_alloc(void);
288 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame
*sf
);
289 static int btrfsic_process_superblock(struct btrfsic_state
*state
,
290 struct btrfs_fs_devices
*fs_devices
);
291 static int btrfsic_process_metablock(struct btrfsic_state
*state
,
292 struct btrfsic_block
*block
,
293 struct btrfsic_block_data_ctx
*block_ctx
,
294 int limit_nesting
, int force_iodone_flag
);
295 static void btrfsic_read_from_block_data(
296 struct btrfsic_block_data_ctx
*block_ctx
,
297 void *dst
, u32 offset
, size_t len
);
298 static int btrfsic_create_link_to_next_block(
299 struct btrfsic_state
*state
,
300 struct btrfsic_block
*block
,
301 struct btrfsic_block_data_ctx
302 *block_ctx
, u64 next_bytenr
,
304 struct btrfsic_block_data_ctx
*next_block_ctx
,
305 struct btrfsic_block
**next_blockp
,
306 int force_iodone_flag
,
307 int *num_copiesp
, int *mirror_nump
,
308 struct btrfs_disk_key
*disk_key
,
309 u64 parent_generation
);
310 static int btrfsic_handle_extent_data(struct btrfsic_state
*state
,
311 struct btrfsic_block
*block
,
312 struct btrfsic_block_data_ctx
*block_ctx
,
313 u32 item_offset
, int force_iodone_flag
);
314 static int btrfsic_map_block(struct btrfsic_state
*state
, u64 bytenr
, u32 len
,
315 struct btrfsic_block_data_ctx
*block_ctx_out
,
317 static int btrfsic_map_superblock(struct btrfsic_state
*state
, u64 bytenr
,
318 u32 len
, struct block_device
*bdev
,
319 struct btrfsic_block_data_ctx
*block_ctx_out
);
320 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx
*block_ctx
);
321 static int btrfsic_read_block(struct btrfsic_state
*state
,
322 struct btrfsic_block_data_ctx
*block_ctx
);
323 static void btrfsic_dump_database(struct btrfsic_state
*state
);
324 static void btrfsic_complete_bio_end_io(struct bio
*bio
, int err
);
325 static int btrfsic_test_for_metadata(struct btrfsic_state
*state
,
326 char **datav
, unsigned int num_pages
);
327 static void btrfsic_process_written_block(struct btrfsic_dev_state
*dev_state
,
328 u64 dev_bytenr
, char **mapped_datav
,
329 unsigned int num_pages
,
330 struct bio
*bio
, int *bio_is_patched
,
331 struct buffer_head
*bh
,
332 int submit_bio_bh_rw
);
333 static int btrfsic_process_written_superblock(
334 struct btrfsic_state
*state
,
335 struct btrfsic_block
*const block
,
336 struct btrfs_super_block
*const super_hdr
);
337 static void btrfsic_bio_end_io(struct bio
*bp
, int bio_error_status
);
338 static void btrfsic_bh_end_io(struct buffer_head
*bh
, int uptodate
);
339 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state
*state
,
340 const struct btrfsic_block
*block
,
341 int recursion_level
);
342 static int btrfsic_check_all_ref_blocks(struct btrfsic_state
*state
,
343 struct btrfsic_block
*const block
,
344 int recursion_level
);
345 static void btrfsic_print_add_link(const struct btrfsic_state
*state
,
346 const struct btrfsic_block_link
*l
);
347 static void btrfsic_print_rem_link(const struct btrfsic_state
*state
,
348 const struct btrfsic_block_link
*l
);
349 static char btrfsic_get_block_type(const struct btrfsic_state
*state
,
350 const struct btrfsic_block
*block
);
351 static void btrfsic_dump_tree(const struct btrfsic_state
*state
);
352 static void btrfsic_dump_tree_sub(const struct btrfsic_state
*state
,
353 const struct btrfsic_block
*block
,
355 static struct btrfsic_block_link
*btrfsic_block_link_lookup_or_add(
356 struct btrfsic_state
*state
,
357 struct btrfsic_block_data_ctx
*next_block_ctx
,
358 struct btrfsic_block
*next_block
,
359 struct btrfsic_block
*from_block
,
360 u64 parent_generation
);
361 static struct btrfsic_block
*btrfsic_block_lookup_or_add(
362 struct btrfsic_state
*state
,
363 struct btrfsic_block_data_ctx
*block_ctx
,
364 const char *additional_string
,
370 static int btrfsic_process_superblock_dev_mirror(
371 struct btrfsic_state
*state
,
372 struct btrfsic_dev_state
*dev_state
,
373 struct btrfs_device
*device
,
374 int superblock_mirror_num
,
375 struct btrfsic_dev_state
**selected_dev_state
,
376 struct btrfs_super_block
*selected_super
);
377 static struct btrfsic_dev_state
*btrfsic_dev_state_lookup(
378 struct block_device
*bdev
);
379 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state
*state
,
381 struct btrfsic_dev_state
*dev_state
,
384 static struct mutex btrfsic_mutex
;
385 static int btrfsic_is_initialized
;
386 static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable
;
389 static void btrfsic_block_init(struct btrfsic_block
*b
)
391 b
->magic_num
= BTRFSIC_BLOCK_MAGIC_NUMBER
;
394 b
->logical_bytenr
= 0;
395 b
->generation
= BTRFSIC_GENERATION_UNKNOWN
;
396 b
->disk_key
.objectid
= 0;
397 b
->disk_key
.type
= 0;
398 b
->disk_key
.offset
= 0;
400 b
->is_superblock
= 0;
402 b
->iodone_w_error
= 0;
403 b
->never_written
= 0;
405 b
->next_in_same_bio
= NULL
;
406 b
->orig_bio_bh_private
= NULL
;
407 b
->orig_bio_bh_end_io
.bio
= NULL
;
408 INIT_LIST_HEAD(&b
->collision_resolving_node
);
409 INIT_LIST_HEAD(&b
->all_blocks_node
);
410 INIT_LIST_HEAD(&b
->ref_to_list
);
411 INIT_LIST_HEAD(&b
->ref_from_list
);
412 b
->submit_bio_bh_rw
= 0;
416 static struct btrfsic_block
*btrfsic_block_alloc(void)
418 struct btrfsic_block
*b
;
420 b
= kzalloc(sizeof(*b
), GFP_NOFS
);
422 btrfsic_block_init(b
);
427 static void btrfsic_block_free(struct btrfsic_block
*b
)
429 BUG_ON(!(NULL
== b
|| BTRFSIC_BLOCK_MAGIC_NUMBER
== b
->magic_num
));
433 static void btrfsic_block_link_init(struct btrfsic_block_link
*l
)
435 l
->magic_num
= BTRFSIC_BLOCK_LINK_MAGIC_NUMBER
;
437 INIT_LIST_HEAD(&l
->node_ref_to
);
438 INIT_LIST_HEAD(&l
->node_ref_from
);
439 INIT_LIST_HEAD(&l
->collision_resolving_node
);
440 l
->block_ref_to
= NULL
;
441 l
->block_ref_from
= NULL
;
444 static struct btrfsic_block_link
*btrfsic_block_link_alloc(void)
446 struct btrfsic_block_link
*l
;
448 l
= kzalloc(sizeof(*l
), GFP_NOFS
);
450 btrfsic_block_link_init(l
);
455 static void btrfsic_block_link_free(struct btrfsic_block_link
*l
)
457 BUG_ON(!(NULL
== l
|| BTRFSIC_BLOCK_LINK_MAGIC_NUMBER
== l
->magic_num
));
461 static void btrfsic_dev_state_init(struct btrfsic_dev_state
*ds
)
463 ds
->magic_num
= BTRFSIC_DEV2STATE_MAGIC_NUMBER
;
467 INIT_LIST_HEAD(&ds
->collision_resolving_node
);
468 ds
->last_flush_gen
= 0;
469 btrfsic_block_init(&ds
->dummy_block_for_bio_bh_flush
);
470 ds
->dummy_block_for_bio_bh_flush
.is_iodone
= 1;
471 ds
->dummy_block_for_bio_bh_flush
.dev_state
= ds
;
474 static struct btrfsic_dev_state
*btrfsic_dev_state_alloc(void)
476 struct btrfsic_dev_state
*ds
;
478 ds
= kzalloc(sizeof(*ds
), GFP_NOFS
);
480 btrfsic_dev_state_init(ds
);
485 static void btrfsic_dev_state_free(struct btrfsic_dev_state
*ds
)
487 BUG_ON(!(NULL
== ds
||
488 BTRFSIC_DEV2STATE_MAGIC_NUMBER
== ds
->magic_num
));
492 static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable
*h
)
496 for (i
= 0; i
< BTRFSIC_BLOCK_HASHTABLE_SIZE
; i
++)
497 INIT_LIST_HEAD(h
->table
+ i
);
500 static void btrfsic_block_hashtable_add(struct btrfsic_block
*b
,
501 struct btrfsic_block_hashtable
*h
)
503 const unsigned int hashval
=
504 (((unsigned int)(b
->dev_bytenr
>> 16)) ^
505 ((unsigned int)((uintptr_t)b
->dev_state
->bdev
))) &
506 (BTRFSIC_BLOCK_HASHTABLE_SIZE
- 1);
508 list_add(&b
->collision_resolving_node
, h
->table
+ hashval
);
511 static void btrfsic_block_hashtable_remove(struct btrfsic_block
*b
)
513 list_del(&b
->collision_resolving_node
);
516 static struct btrfsic_block
*btrfsic_block_hashtable_lookup(
517 struct block_device
*bdev
,
519 struct btrfsic_block_hashtable
*h
)
521 const unsigned int hashval
=
522 (((unsigned int)(dev_bytenr
>> 16)) ^
523 ((unsigned int)((uintptr_t)bdev
))) &
524 (BTRFSIC_BLOCK_HASHTABLE_SIZE
- 1);
525 struct list_head
*elem
;
527 list_for_each(elem
, h
->table
+ hashval
) {
528 struct btrfsic_block
*const b
=
529 list_entry(elem
, struct btrfsic_block
,
530 collision_resolving_node
);
532 if (b
->dev_state
->bdev
== bdev
&& b
->dev_bytenr
== dev_bytenr
)
539 static void btrfsic_block_link_hashtable_init(
540 struct btrfsic_block_link_hashtable
*h
)
544 for (i
= 0; i
< BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE
; i
++)
545 INIT_LIST_HEAD(h
->table
+ i
);
548 static void btrfsic_block_link_hashtable_add(
549 struct btrfsic_block_link
*l
,
550 struct btrfsic_block_link_hashtable
*h
)
552 const unsigned int hashval
=
553 (((unsigned int)(l
->block_ref_to
->dev_bytenr
>> 16)) ^
554 ((unsigned int)(l
->block_ref_from
->dev_bytenr
>> 16)) ^
555 ((unsigned int)((uintptr_t)l
->block_ref_to
->dev_state
->bdev
)) ^
556 ((unsigned int)((uintptr_t)l
->block_ref_from
->dev_state
->bdev
)))
557 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE
- 1);
559 BUG_ON(NULL
== l
->block_ref_to
);
560 BUG_ON(NULL
== l
->block_ref_from
);
561 list_add(&l
->collision_resolving_node
, h
->table
+ hashval
);
564 static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link
*l
)
566 list_del(&l
->collision_resolving_node
);
569 static struct btrfsic_block_link
*btrfsic_block_link_hashtable_lookup(
570 struct block_device
*bdev_ref_to
,
571 u64 dev_bytenr_ref_to
,
572 struct block_device
*bdev_ref_from
,
573 u64 dev_bytenr_ref_from
,
574 struct btrfsic_block_link_hashtable
*h
)
576 const unsigned int hashval
=
577 (((unsigned int)(dev_bytenr_ref_to
>> 16)) ^
578 ((unsigned int)(dev_bytenr_ref_from
>> 16)) ^
579 ((unsigned int)((uintptr_t)bdev_ref_to
)) ^
580 ((unsigned int)((uintptr_t)bdev_ref_from
))) &
581 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE
- 1);
582 struct list_head
*elem
;
584 list_for_each(elem
, h
->table
+ hashval
) {
585 struct btrfsic_block_link
*const l
=
586 list_entry(elem
, struct btrfsic_block_link
,
587 collision_resolving_node
);
589 BUG_ON(NULL
== l
->block_ref_to
);
590 BUG_ON(NULL
== l
->block_ref_from
);
591 if (l
->block_ref_to
->dev_state
->bdev
== bdev_ref_to
&&
592 l
->block_ref_to
->dev_bytenr
== dev_bytenr_ref_to
&&
593 l
->block_ref_from
->dev_state
->bdev
== bdev_ref_from
&&
594 l
->block_ref_from
->dev_bytenr
== dev_bytenr_ref_from
)
601 static void btrfsic_dev_state_hashtable_init(
602 struct btrfsic_dev_state_hashtable
*h
)
606 for (i
= 0; i
< BTRFSIC_DEV2STATE_HASHTABLE_SIZE
; i
++)
607 INIT_LIST_HEAD(h
->table
+ i
);
610 static void btrfsic_dev_state_hashtable_add(
611 struct btrfsic_dev_state
*ds
,
612 struct btrfsic_dev_state_hashtable
*h
)
614 const unsigned int hashval
=
615 (((unsigned int)((uintptr_t)ds
->bdev
)) &
616 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE
- 1));
618 list_add(&ds
->collision_resolving_node
, h
->table
+ hashval
);
621 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state
*ds
)
623 list_del(&ds
->collision_resolving_node
);
626 static struct btrfsic_dev_state
*btrfsic_dev_state_hashtable_lookup(
627 struct block_device
*bdev
,
628 struct btrfsic_dev_state_hashtable
*h
)
630 const unsigned int hashval
=
631 (((unsigned int)((uintptr_t)bdev
)) &
632 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE
- 1));
633 struct list_head
*elem
;
635 list_for_each(elem
, h
->table
+ hashval
) {
636 struct btrfsic_dev_state
*const ds
=
637 list_entry(elem
, struct btrfsic_dev_state
,
638 collision_resolving_node
);
640 if (ds
->bdev
== bdev
)
647 static int btrfsic_process_superblock(struct btrfsic_state
*state
,
648 struct btrfs_fs_devices
*fs_devices
)
651 struct btrfs_super_block
*selected_super
;
652 struct list_head
*dev_head
= &fs_devices
->devices
;
653 struct btrfs_device
*device
;
654 struct btrfsic_dev_state
*selected_dev_state
= NULL
;
657 BUG_ON(NULL
== state
);
658 selected_super
= kzalloc(sizeof(*selected_super
), GFP_NOFS
);
659 if (NULL
== selected_super
) {
660 printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
664 list_for_each_entry(device
, dev_head
, dev_list
) {
666 struct btrfsic_dev_state
*dev_state
;
668 if (!device
->bdev
|| !device
->name
)
671 dev_state
= btrfsic_dev_state_lookup(device
->bdev
);
672 BUG_ON(NULL
== dev_state
);
673 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
674 ret
= btrfsic_process_superblock_dev_mirror(
675 state
, dev_state
, device
, i
,
676 &selected_dev_state
, selected_super
);
677 if (0 != ret
&& 0 == i
) {
678 kfree(selected_super
);
684 if (NULL
== state
->latest_superblock
) {
685 printk(KERN_INFO
"btrfsic: no superblock found!\n");
686 kfree(selected_super
);
690 state
->csum_size
= btrfs_super_csum_size(selected_super
);
692 for (pass
= 0; pass
< 3; pass
++) {
699 next_bytenr
= btrfs_super_root(selected_super
);
700 if (state
->print_mask
&
701 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION
)
702 printk(KERN_INFO
"root@%llu\n",
703 (unsigned long long)next_bytenr
);
706 next_bytenr
= btrfs_super_chunk_root(selected_super
);
707 if (state
->print_mask
&
708 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION
)
709 printk(KERN_INFO
"chunk@%llu\n",
710 (unsigned long long)next_bytenr
);
713 next_bytenr
= btrfs_super_log_root(selected_super
);
714 if (0 == next_bytenr
)
716 if (state
->print_mask
&
717 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION
)
718 printk(KERN_INFO
"log@%llu\n",
719 (unsigned long long)next_bytenr
);
724 btrfs_num_copies(&state
->root
->fs_info
->mapping_tree
,
725 next_bytenr
, state
->metablock_size
);
726 if (state
->print_mask
& BTRFSIC_PRINT_MASK_NUM_COPIES
)
727 printk(KERN_INFO
"num_copies(log_bytenr=%llu) = %d\n",
728 (unsigned long long)next_bytenr
, num_copies
);
730 for (mirror_num
= 1; mirror_num
<= num_copies
; mirror_num
++) {
731 struct btrfsic_block
*next_block
;
732 struct btrfsic_block_data_ctx tmp_next_block_ctx
;
733 struct btrfsic_block_link
*l
;
735 ret
= btrfsic_map_block(state
, next_bytenr
,
736 state
->metablock_size
,
740 printk(KERN_INFO
"btrfsic:"
741 " btrfsic_map_block(root @%llu,"
742 " mirror %d) failed!\n",
743 (unsigned long long)next_bytenr
,
745 kfree(selected_super
);
749 next_block
= btrfsic_block_hashtable_lookup(
750 tmp_next_block_ctx
.dev
->bdev
,
751 tmp_next_block_ctx
.dev_bytenr
,
752 &state
->block_hashtable
);
753 BUG_ON(NULL
== next_block
);
755 l
= btrfsic_block_link_hashtable_lookup(
756 tmp_next_block_ctx
.dev
->bdev
,
757 tmp_next_block_ctx
.dev_bytenr
,
758 state
->latest_superblock
->dev_state
->
760 state
->latest_superblock
->dev_bytenr
,
761 &state
->block_link_hashtable
);
764 ret
= btrfsic_read_block(state
, &tmp_next_block_ctx
);
765 if (ret
< (int)PAGE_CACHE_SIZE
) {
767 "btrfsic: read @logical %llu failed!\n",
769 tmp_next_block_ctx
.start
);
770 btrfsic_release_block_ctx(&tmp_next_block_ctx
);
771 kfree(selected_super
);
775 ret
= btrfsic_process_metablock(state
,
778 BTRFS_MAX_LEVEL
+ 3, 1);
779 btrfsic_release_block_ctx(&tmp_next_block_ctx
);
783 kfree(selected_super
);
787 static int btrfsic_process_superblock_dev_mirror(
788 struct btrfsic_state
*state
,
789 struct btrfsic_dev_state
*dev_state
,
790 struct btrfs_device
*device
,
791 int superblock_mirror_num
,
792 struct btrfsic_dev_state
**selected_dev_state
,
793 struct btrfs_super_block
*selected_super
)
795 struct btrfs_super_block
*super_tmp
;
797 struct buffer_head
*bh
;
798 struct btrfsic_block
*superblock_tmp
;
800 struct block_device
*const superblock_bdev
= device
->bdev
;
802 /* super block bytenr is always the unmapped device bytenr */
803 dev_bytenr
= btrfs_sb_offset(superblock_mirror_num
);
804 if (dev_bytenr
+ BTRFS_SUPER_INFO_SIZE
> device
->total_bytes
)
806 bh
= __bread(superblock_bdev
, dev_bytenr
/ 4096,
807 BTRFS_SUPER_INFO_SIZE
);
810 super_tmp
= (struct btrfs_super_block
*)
811 (bh
->b_data
+ (dev_bytenr
& 4095));
813 if (btrfs_super_bytenr(super_tmp
) != dev_bytenr
||
814 strncmp((char *)(&(super_tmp
->magic
)), BTRFS_MAGIC
,
815 sizeof(super_tmp
->magic
)) ||
816 memcmp(device
->uuid
, super_tmp
->dev_item
.uuid
, BTRFS_UUID_SIZE
) ||
817 btrfs_super_nodesize(super_tmp
) != state
->metablock_size
||
818 btrfs_super_leafsize(super_tmp
) != state
->metablock_size
||
819 btrfs_super_sectorsize(super_tmp
) != state
->datablock_size
) {
825 btrfsic_block_hashtable_lookup(superblock_bdev
,
827 &state
->block_hashtable
);
828 if (NULL
== superblock_tmp
) {
829 superblock_tmp
= btrfsic_block_alloc();
830 if (NULL
== superblock_tmp
) {
831 printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
835 /* for superblock, only the dev_bytenr makes sense */
836 superblock_tmp
->dev_bytenr
= dev_bytenr
;
837 superblock_tmp
->dev_state
= dev_state
;
838 superblock_tmp
->logical_bytenr
= dev_bytenr
;
839 superblock_tmp
->generation
= btrfs_super_generation(super_tmp
);
840 superblock_tmp
->is_metadata
= 1;
841 superblock_tmp
->is_superblock
= 1;
842 superblock_tmp
->is_iodone
= 1;
843 superblock_tmp
->never_written
= 0;
844 superblock_tmp
->mirror_num
= 1 + superblock_mirror_num
;
845 if (state
->print_mask
& BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE
)
846 printk(KERN_INFO
"New initial S-block (bdev %p, %s)"
847 " @%llu (%s/%llu/%d)\n",
848 superblock_bdev
, device
->name
,
849 (unsigned long long)dev_bytenr
,
851 (unsigned long long)dev_bytenr
,
852 superblock_mirror_num
);
853 list_add(&superblock_tmp
->all_blocks_node
,
854 &state
->all_blocks_list
);
855 btrfsic_block_hashtable_add(superblock_tmp
,
856 &state
->block_hashtable
);
859 /* select the one with the highest generation field */
860 if (btrfs_super_generation(super_tmp
) >
861 state
->max_superblock_generation
||
862 0 == state
->max_superblock_generation
) {
863 memcpy(selected_super
, super_tmp
, sizeof(*selected_super
));
864 *selected_dev_state
= dev_state
;
865 state
->max_superblock_generation
=
866 btrfs_super_generation(super_tmp
);
867 state
->latest_superblock
= superblock_tmp
;
870 for (pass
= 0; pass
< 3; pass
++) {
874 const char *additional_string
= NULL
;
875 struct btrfs_disk_key tmp_disk_key
;
877 tmp_disk_key
.type
= BTRFS_ROOT_ITEM_KEY
;
878 tmp_disk_key
.offset
= 0;
881 tmp_disk_key
.objectid
=
882 cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID
);
883 additional_string
= "initial root ";
884 next_bytenr
= btrfs_super_root(super_tmp
);
887 tmp_disk_key
.objectid
=
888 cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID
);
889 additional_string
= "initial chunk ";
890 next_bytenr
= btrfs_super_chunk_root(super_tmp
);
893 tmp_disk_key
.objectid
=
894 cpu_to_le64(BTRFS_TREE_LOG_OBJECTID
);
895 additional_string
= "initial log ";
896 next_bytenr
= btrfs_super_log_root(super_tmp
);
897 if (0 == next_bytenr
)
903 btrfs_num_copies(&state
->root
->fs_info
->mapping_tree
,
904 next_bytenr
, state
->metablock_size
);
905 if (state
->print_mask
& BTRFSIC_PRINT_MASK_NUM_COPIES
)
906 printk(KERN_INFO
"num_copies(log_bytenr=%llu) = %d\n",
907 (unsigned long long)next_bytenr
, num_copies
);
908 for (mirror_num
= 1; mirror_num
<= num_copies
; mirror_num
++) {
909 struct btrfsic_block
*next_block
;
910 struct btrfsic_block_data_ctx tmp_next_block_ctx
;
911 struct btrfsic_block_link
*l
;
913 if (btrfsic_map_block(state
, next_bytenr
,
914 state
->metablock_size
,
917 printk(KERN_INFO
"btrfsic: btrfsic_map_block("
918 "bytenr @%llu, mirror %d) failed!\n",
919 (unsigned long long)next_bytenr
,
925 next_block
= btrfsic_block_lookup_or_add(
926 state
, &tmp_next_block_ctx
,
927 additional_string
, 1, 1, 0,
929 if (NULL
== next_block
) {
930 btrfsic_release_block_ctx(&tmp_next_block_ctx
);
935 next_block
->disk_key
= tmp_disk_key
;
936 next_block
->generation
= BTRFSIC_GENERATION_UNKNOWN
;
937 l
= btrfsic_block_link_lookup_or_add(
938 state
, &tmp_next_block_ctx
,
939 next_block
, superblock_tmp
,
940 BTRFSIC_GENERATION_UNKNOWN
);
941 btrfsic_release_block_ctx(&tmp_next_block_ctx
);
948 if (state
->print_mask
& BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES
)
949 btrfsic_dump_tree_sub(state
, superblock_tmp
, 0);
955 static struct btrfsic_stack_frame
*btrfsic_stack_frame_alloc(void)
957 struct btrfsic_stack_frame
*sf
;
959 sf
= kzalloc(sizeof(*sf
), GFP_NOFS
);
961 printk(KERN_INFO
"btrfsic: alloc memory failed!\n");
963 sf
->magic
= BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER
;
967 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame
*sf
)
969 BUG_ON(!(NULL
== sf
||
970 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER
== sf
->magic
));
974 static int btrfsic_process_metablock(
975 struct btrfsic_state
*state
,
976 struct btrfsic_block
*const first_block
,
977 struct btrfsic_block_data_ctx
*const first_block_ctx
,
978 int first_limit_nesting
, int force_iodone_flag
)
980 struct btrfsic_stack_frame initial_stack_frame
= { 0 };
981 struct btrfsic_stack_frame
*sf
;
982 struct btrfsic_stack_frame
*next_stack
;
983 struct btrfs_header
*const first_hdr
=
984 (struct btrfs_header
*)first_block_ctx
->datav
[0];
987 sf
= &initial_stack_frame
;
990 sf
->limit_nesting
= first_limit_nesting
;
991 sf
->block
= first_block
;
992 sf
->block_ctx
= first_block_ctx
;
993 sf
->next_block
= NULL
;
997 continue_with_new_stack_frame
:
998 sf
->block
->generation
= le64_to_cpu(sf
->hdr
->generation
);
999 if (0 == sf
->hdr
->level
) {
1000 struct btrfs_leaf
*const leafhdr
=
1001 (struct btrfs_leaf
*)sf
->hdr
;
1004 sf
->nr
= le32_to_cpu(leafhdr
->header
.nritems
);
1006 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1008 "leaf %llu items %d generation %llu"
1010 (unsigned long long)
1011 sf
->block_ctx
->start
,
1013 (unsigned long long)
1014 le64_to_cpu(leafhdr
->header
.generation
),
1015 (unsigned long long)
1016 le64_to_cpu(leafhdr
->header
.owner
));
1019 continue_with_current_leaf_stack_frame
:
1020 if (0 == sf
->num_copies
|| sf
->mirror_num
> sf
->num_copies
) {
1025 if (sf
->i
< sf
->nr
) {
1026 struct btrfs_item disk_item
;
1027 u32 disk_item_offset
=
1028 (uintptr_t)(leafhdr
->items
+ sf
->i
) -
1030 struct btrfs_disk_key
*disk_key
;
1034 if (disk_item_offset
+ sizeof(struct btrfs_item
) >
1035 sf
->block_ctx
->len
) {
1036 leaf_item_out_of_bounce_error
:
1038 "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
1039 sf
->block_ctx
->start
,
1040 sf
->block_ctx
->dev
->name
);
1041 goto one_stack_frame_backwards
;
1043 btrfsic_read_from_block_data(sf
->block_ctx
,
1046 sizeof(struct btrfs_item
));
1047 item_offset
= le32_to_cpu(disk_item
.offset
);
1048 disk_key
= &disk_item
.key
;
1049 type
= disk_key
->type
;
1051 if (BTRFS_ROOT_ITEM_KEY
== type
) {
1052 struct btrfs_root_item root_item
;
1053 u32 root_item_offset
;
1056 root_item_offset
= item_offset
+
1057 offsetof(struct btrfs_leaf
, items
);
1058 if (root_item_offset
+
1059 sizeof(struct btrfs_root_item
) >
1061 goto leaf_item_out_of_bounce_error
;
1062 btrfsic_read_from_block_data(
1063 sf
->block_ctx
, &root_item
,
1065 sizeof(struct btrfs_root_item
));
1066 next_bytenr
= le64_to_cpu(root_item
.bytenr
);
1069 btrfsic_create_link_to_next_block(
1075 &sf
->next_block_ctx
,
1081 le64_to_cpu(root_item
.
1084 goto one_stack_frame_backwards
;
1086 if (NULL
!= sf
->next_block
) {
1087 struct btrfs_header
*const next_hdr
=
1088 (struct btrfs_header
*)
1089 sf
->next_block_ctx
.datav
[0];
1092 btrfsic_stack_frame_alloc();
1093 if (NULL
== next_stack
) {
1094 btrfsic_release_block_ctx(
1097 goto one_stack_frame_backwards
;
1101 next_stack
->block
= sf
->next_block
;
1102 next_stack
->block_ctx
=
1103 &sf
->next_block_ctx
;
1104 next_stack
->next_block
= NULL
;
1105 next_stack
->hdr
= next_hdr
;
1106 next_stack
->limit_nesting
=
1107 sf
->limit_nesting
- 1;
1108 next_stack
->prev
= sf
;
1110 goto continue_with_new_stack_frame
;
1112 } else if (BTRFS_EXTENT_DATA_KEY
== type
&&
1113 state
->include_extent_data
) {
1114 sf
->error
= btrfsic_handle_extent_data(
1121 goto one_stack_frame_backwards
;
1124 goto continue_with_current_leaf_stack_frame
;
1127 struct btrfs_node
*const nodehdr
= (struct btrfs_node
*)sf
->hdr
;
1130 sf
->nr
= le32_to_cpu(nodehdr
->header
.nritems
);
1132 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1133 printk(KERN_INFO
"node %llu level %d items %d"
1134 " generation %llu owner %llu\n",
1135 (unsigned long long)
1136 sf
->block_ctx
->start
,
1137 nodehdr
->header
.level
, sf
->nr
,
1138 (unsigned long long)
1139 le64_to_cpu(nodehdr
->header
.generation
),
1140 (unsigned long long)
1141 le64_to_cpu(nodehdr
->header
.owner
));
1144 continue_with_current_node_stack_frame
:
1145 if (0 == sf
->num_copies
|| sf
->mirror_num
> sf
->num_copies
) {
1150 if (sf
->i
< sf
->nr
) {
1151 struct btrfs_key_ptr key_ptr
;
1155 key_ptr_offset
= (uintptr_t)(nodehdr
->ptrs
+ sf
->i
) -
1157 if (key_ptr_offset
+ sizeof(struct btrfs_key_ptr
) >
1158 sf
->block_ctx
->len
) {
1160 "btrfsic: node item out of bounce at logical %llu, dev %s\n",
1161 sf
->block_ctx
->start
,
1162 sf
->block_ctx
->dev
->name
);
1163 goto one_stack_frame_backwards
;
1165 btrfsic_read_from_block_data(
1166 sf
->block_ctx
, &key_ptr
, key_ptr_offset
,
1167 sizeof(struct btrfs_key_ptr
));
1168 next_bytenr
= le64_to_cpu(key_ptr
.blockptr
);
1170 sf
->error
= btrfsic_create_link_to_next_block(
1176 &sf
->next_block_ctx
,
1182 le64_to_cpu(key_ptr
.generation
));
1184 goto one_stack_frame_backwards
;
1186 if (NULL
!= sf
->next_block
) {
1187 struct btrfs_header
*const next_hdr
=
1188 (struct btrfs_header
*)
1189 sf
->next_block_ctx
.datav
[0];
1191 next_stack
= btrfsic_stack_frame_alloc();
1192 if (NULL
== next_stack
)
1193 goto one_stack_frame_backwards
;
1196 next_stack
->block
= sf
->next_block
;
1197 next_stack
->block_ctx
= &sf
->next_block_ctx
;
1198 next_stack
->next_block
= NULL
;
1199 next_stack
->hdr
= next_hdr
;
1200 next_stack
->limit_nesting
=
1201 sf
->limit_nesting
- 1;
1202 next_stack
->prev
= sf
;
1204 goto continue_with_new_stack_frame
;
1207 goto continue_with_current_node_stack_frame
;
1211 one_stack_frame_backwards
:
1212 if (NULL
!= sf
->prev
) {
1213 struct btrfsic_stack_frame
*const prev
= sf
->prev
;
1215 /* the one for the initial block is freed in the caller */
1216 btrfsic_release_block_ctx(sf
->block_ctx
);
1219 prev
->error
= sf
->error
;
1220 btrfsic_stack_frame_free(sf
);
1222 goto one_stack_frame_backwards
;
1225 btrfsic_stack_frame_free(sf
);
1227 goto continue_with_new_stack_frame
;
1229 BUG_ON(&initial_stack_frame
!= sf
);
1235 static void btrfsic_read_from_block_data(
1236 struct btrfsic_block_data_ctx
*block_ctx
,
1237 void *dstv
, u32 offset
, size_t len
)
1240 size_t offset_in_page
;
1242 char *dst
= (char *)dstv
;
1243 size_t start_offset
= block_ctx
->start
& ((u64
)PAGE_CACHE_SIZE
- 1);
1244 unsigned long i
= (start_offset
+ offset
) >> PAGE_CACHE_SHIFT
;
1246 WARN_ON(offset
+ len
> block_ctx
->len
);
1247 offset_in_page
= (start_offset
+ offset
) &
1248 ((unsigned long)PAGE_CACHE_SIZE
- 1);
1251 cur
= min(len
, ((size_t)PAGE_CACHE_SIZE
- offset_in_page
));
1252 BUG_ON(i
>= (block_ctx
->len
+ PAGE_CACHE_SIZE
- 1) >>
1254 kaddr
= block_ctx
->datav
[i
];
1255 memcpy(dst
, kaddr
+ offset_in_page
, cur
);
1264 static int btrfsic_create_link_to_next_block(
1265 struct btrfsic_state
*state
,
1266 struct btrfsic_block
*block
,
1267 struct btrfsic_block_data_ctx
*block_ctx
,
1270 struct btrfsic_block_data_ctx
*next_block_ctx
,
1271 struct btrfsic_block
**next_blockp
,
1272 int force_iodone_flag
,
1273 int *num_copiesp
, int *mirror_nump
,
1274 struct btrfs_disk_key
*disk_key
,
1275 u64 parent_generation
)
1277 struct btrfsic_block
*next_block
= NULL
;
1279 struct btrfsic_block_link
*l
;
1280 int did_alloc_block_link
;
1281 int block_was_created
;
1283 *next_blockp
= NULL
;
1284 if (0 == *num_copiesp
) {
1286 btrfs_num_copies(&state
->root
->fs_info
->mapping_tree
,
1287 next_bytenr
, state
->metablock_size
);
1288 if (state
->print_mask
& BTRFSIC_PRINT_MASK_NUM_COPIES
)
1289 printk(KERN_INFO
"num_copies(log_bytenr=%llu) = %d\n",
1290 (unsigned long long)next_bytenr
, *num_copiesp
);
1294 if (*mirror_nump
> *num_copiesp
)
1297 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1299 "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
1301 ret
= btrfsic_map_block(state
, next_bytenr
,
1302 state
->metablock_size
,
1303 next_block_ctx
, *mirror_nump
);
1306 "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
1307 (unsigned long long)next_bytenr
, *mirror_nump
);
1308 btrfsic_release_block_ctx(next_block_ctx
);
1309 *next_blockp
= NULL
;
1313 next_block
= btrfsic_block_lookup_or_add(state
,
1314 next_block_ctx
, "referenced ",
1315 1, force_iodone_flag
,
1318 &block_was_created
);
1319 if (NULL
== next_block
) {
1320 btrfsic_release_block_ctx(next_block_ctx
);
1321 *next_blockp
= NULL
;
1324 if (block_was_created
) {
1326 next_block
->generation
= BTRFSIC_GENERATION_UNKNOWN
;
1328 if (next_block
->logical_bytenr
!= next_bytenr
&&
1329 !(!next_block
->is_metadata
&&
1330 0 == next_block
->logical_bytenr
)) {
1332 "Referenced block @%llu (%s/%llu/%d)"
1333 " found in hash table, %c,"
1334 " bytenr mismatch (!= stored %llu).\n",
1335 (unsigned long long)next_bytenr
,
1336 next_block_ctx
->dev
->name
,
1337 (unsigned long long)next_block_ctx
->dev_bytenr
,
1339 btrfsic_get_block_type(state
, next_block
),
1340 (unsigned long long)next_block
->logical_bytenr
);
1341 } else if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1343 "Referenced block @%llu (%s/%llu/%d)"
1344 " found in hash table, %c.\n",
1345 (unsigned long long)next_bytenr
,
1346 next_block_ctx
->dev
->name
,
1347 (unsigned long long)next_block_ctx
->dev_bytenr
,
1349 btrfsic_get_block_type(state
, next_block
));
1350 next_block
->logical_bytenr
= next_bytenr
;
1352 next_block
->mirror_num
= *mirror_nump
;
1353 l
= btrfsic_block_link_hashtable_lookup(
1354 next_block_ctx
->dev
->bdev
,
1355 next_block_ctx
->dev_bytenr
,
1356 block_ctx
->dev
->bdev
,
1357 block_ctx
->dev_bytenr
,
1358 &state
->block_link_hashtable
);
1361 next_block
->disk_key
= *disk_key
;
1363 l
= btrfsic_block_link_alloc();
1365 printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
1366 btrfsic_release_block_ctx(next_block_ctx
);
1367 *next_blockp
= NULL
;
1371 did_alloc_block_link
= 1;
1372 l
->block_ref_to
= next_block
;
1373 l
->block_ref_from
= block
;
1375 l
->parent_generation
= parent_generation
;
1377 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1378 btrfsic_print_add_link(state
, l
);
1380 list_add(&l
->node_ref_to
, &block
->ref_to_list
);
1381 list_add(&l
->node_ref_from
, &next_block
->ref_from_list
);
1383 btrfsic_block_link_hashtable_add(l
,
1384 &state
->block_link_hashtable
);
1386 did_alloc_block_link
= 0;
1387 if (0 == limit_nesting
) {
1389 l
->parent_generation
= parent_generation
;
1390 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1391 btrfsic_print_add_link(state
, l
);
1395 if (limit_nesting
> 0 && did_alloc_block_link
) {
1396 ret
= btrfsic_read_block(state
, next_block_ctx
);
1397 if (ret
< (int)next_block_ctx
->len
) {
1399 "btrfsic: read block @logical %llu failed!\n",
1400 (unsigned long long)next_bytenr
);
1401 btrfsic_release_block_ctx(next_block_ctx
);
1402 *next_blockp
= NULL
;
1406 *next_blockp
= next_block
;
1408 *next_blockp
= NULL
;
1415 static int btrfsic_handle_extent_data(
1416 struct btrfsic_state
*state
,
1417 struct btrfsic_block
*block
,
1418 struct btrfsic_block_data_ctx
*block_ctx
,
1419 u32 item_offset
, int force_iodone_flag
)
1422 struct btrfs_file_extent_item file_extent_item
;
1423 u64 file_extent_item_offset
;
1427 struct btrfsic_block_link
*l
;
1429 file_extent_item_offset
= offsetof(struct btrfs_leaf
, items
) +
1431 if (file_extent_item_offset
+
1432 offsetof(struct btrfs_file_extent_item
, disk_num_bytes
) >
1435 "btrfsic: file item out of bounce at logical %llu, dev %s\n",
1436 block_ctx
->start
, block_ctx
->dev
->name
);
1440 btrfsic_read_from_block_data(block_ctx
, &file_extent_item
,
1441 file_extent_item_offset
,
1442 offsetof(struct btrfs_file_extent_item
, disk_num_bytes
));
1443 if (BTRFS_FILE_EXTENT_REG
!= file_extent_item
.type
||
1444 ((u64
)0) == le64_to_cpu(file_extent_item
.disk_bytenr
)) {
1445 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERY_VERBOSE
)
1446 printk(KERN_INFO
"extent_data: type %u, disk_bytenr = %llu\n",
1447 file_extent_item
.type
,
1448 (unsigned long long)
1449 le64_to_cpu(file_extent_item
.disk_bytenr
));
1453 if (file_extent_item_offset
+ sizeof(struct btrfs_file_extent_item
) >
1456 "btrfsic: file item out of bounce at logical %llu, dev %s\n",
1457 block_ctx
->start
, block_ctx
->dev
->name
);
1460 btrfsic_read_from_block_data(block_ctx
, &file_extent_item
,
1461 file_extent_item_offset
,
1462 sizeof(struct btrfs_file_extent_item
));
1463 next_bytenr
= le64_to_cpu(file_extent_item
.disk_bytenr
) +
1464 le64_to_cpu(file_extent_item
.offset
);
1465 generation
= le64_to_cpu(file_extent_item
.generation
);
1466 num_bytes
= le64_to_cpu(file_extent_item
.num_bytes
);
1467 generation
= le64_to_cpu(file_extent_item
.generation
);
1469 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERY_VERBOSE
)
1470 printk(KERN_INFO
"extent_data: type %u, disk_bytenr = %llu,"
1471 " offset = %llu, num_bytes = %llu\n",
1472 file_extent_item
.type
,
1473 (unsigned long long)
1474 le64_to_cpu(file_extent_item
.disk_bytenr
),
1475 (unsigned long long)le64_to_cpu(file_extent_item
.offset
),
1476 (unsigned long long)num_bytes
);
1477 while (num_bytes
> 0) {
1482 if (num_bytes
> state
->datablock_size
)
1483 chunk_len
= state
->datablock_size
;
1485 chunk_len
= num_bytes
;
1488 btrfs_num_copies(&state
->root
->fs_info
->mapping_tree
,
1489 next_bytenr
, state
->datablock_size
);
1490 if (state
->print_mask
& BTRFSIC_PRINT_MASK_NUM_COPIES
)
1491 printk(KERN_INFO
"num_copies(log_bytenr=%llu) = %d\n",
1492 (unsigned long long)next_bytenr
, num_copies
);
1493 for (mirror_num
= 1; mirror_num
<= num_copies
; mirror_num
++) {
1494 struct btrfsic_block_data_ctx next_block_ctx
;
1495 struct btrfsic_block
*next_block
;
1496 int block_was_created
;
1498 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1499 printk(KERN_INFO
"btrfsic_handle_extent_data("
1500 "mirror_num=%d)\n", mirror_num
);
1501 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERY_VERBOSE
)
1503 "\tdisk_bytenr = %llu, num_bytes %u\n",
1504 (unsigned long long)next_bytenr
,
1506 ret
= btrfsic_map_block(state
, next_bytenr
,
1507 chunk_len
, &next_block_ctx
,
1511 "btrfsic: btrfsic_map_block(@%llu,"
1512 " mirror=%d) failed!\n",
1513 (unsigned long long)next_bytenr
,
1518 next_block
= btrfsic_block_lookup_or_add(
1526 &block_was_created
);
1527 if (NULL
== next_block
) {
1529 "btrfsic: error, kmalloc failed!\n");
1530 btrfsic_release_block_ctx(&next_block_ctx
);
1533 if (!block_was_created
) {
1534 if (next_block
->logical_bytenr
!= next_bytenr
&&
1535 !(!next_block
->is_metadata
&&
1536 0 == next_block
->logical_bytenr
)) {
1539 " @%llu (%s/%llu/%d)"
1540 " found in hash table, D,"
1542 " (!= stored %llu).\n",
1543 (unsigned long long)next_bytenr
,
1544 next_block_ctx
.dev
->name
,
1545 (unsigned long long)
1546 next_block_ctx
.dev_bytenr
,
1548 (unsigned long long)
1549 next_block
->logical_bytenr
);
1551 next_block
->logical_bytenr
= next_bytenr
;
1552 next_block
->mirror_num
= mirror_num
;
1555 l
= btrfsic_block_link_lookup_or_add(state
,
1559 btrfsic_release_block_ctx(&next_block_ctx
);
1564 next_bytenr
+= chunk_len
;
1565 num_bytes
-= chunk_len
;
1571 static int btrfsic_map_block(struct btrfsic_state
*state
, u64 bytenr
, u32 len
,
1572 struct btrfsic_block_data_ctx
*block_ctx_out
,
1577 struct btrfs_bio
*multi
= NULL
;
1578 struct btrfs_device
*device
;
1581 ret
= btrfs_map_block(&state
->root
->fs_info
->mapping_tree
, READ
,
1582 bytenr
, &length
, &multi
, mirror_num
);
1584 device
= multi
->stripes
[0].dev
;
1585 block_ctx_out
->dev
= btrfsic_dev_state_lookup(device
->bdev
);
1586 block_ctx_out
->dev_bytenr
= multi
->stripes
[0].physical
;
1587 block_ctx_out
->start
= bytenr
;
1588 block_ctx_out
->len
= len
;
1589 block_ctx_out
->datav
= NULL
;
1590 block_ctx_out
->pagev
= NULL
;
1591 block_ctx_out
->mem_to_free
= NULL
;
1595 if (NULL
== block_ctx_out
->dev
) {
1597 printk(KERN_INFO
"btrfsic: error, cannot lookup dev (#1)!\n");
1603 static int btrfsic_map_superblock(struct btrfsic_state
*state
, u64 bytenr
,
1604 u32 len
, struct block_device
*bdev
,
1605 struct btrfsic_block_data_ctx
*block_ctx_out
)
1607 block_ctx_out
->dev
= btrfsic_dev_state_lookup(bdev
);
1608 block_ctx_out
->dev_bytenr
= bytenr
;
1609 block_ctx_out
->start
= bytenr
;
1610 block_ctx_out
->len
= len
;
1611 block_ctx_out
->datav
= NULL
;
1612 block_ctx_out
->pagev
= NULL
;
1613 block_ctx_out
->mem_to_free
= NULL
;
1614 if (NULL
!= block_ctx_out
->dev
) {
1617 printk(KERN_INFO
"btrfsic: error, cannot lookup dev (#2)!\n");
1622 static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx
*block_ctx
)
1624 if (block_ctx
->mem_to_free
) {
1625 unsigned int num_pages
;
1627 BUG_ON(!block_ctx
->datav
);
1628 BUG_ON(!block_ctx
->pagev
);
1629 num_pages
= (block_ctx
->len
+ (u64
)PAGE_CACHE_SIZE
- 1) >>
1631 while (num_pages
> 0) {
1633 if (block_ctx
->datav
[num_pages
]) {
1634 kunmap(block_ctx
->pagev
[num_pages
]);
1635 block_ctx
->datav
[num_pages
] = NULL
;
1637 if (block_ctx
->pagev
[num_pages
]) {
1638 __free_page(block_ctx
->pagev
[num_pages
]);
1639 block_ctx
->pagev
[num_pages
] = NULL
;
1643 kfree(block_ctx
->mem_to_free
);
1644 block_ctx
->mem_to_free
= NULL
;
1645 block_ctx
->pagev
= NULL
;
1646 block_ctx
->datav
= NULL
;
1650 static int btrfsic_read_block(struct btrfsic_state
*state
,
1651 struct btrfsic_block_data_ctx
*block_ctx
)
1653 unsigned int num_pages
;
1658 BUG_ON(block_ctx
->datav
);
1659 BUG_ON(block_ctx
->pagev
);
1660 BUG_ON(block_ctx
->mem_to_free
);
1661 if (block_ctx
->dev_bytenr
& ((u64
)PAGE_CACHE_SIZE
- 1)) {
1663 "btrfsic: read_block() with unaligned bytenr %llu\n",
1664 (unsigned long long)block_ctx
->dev_bytenr
);
1668 num_pages
= (block_ctx
->len
+ (u64
)PAGE_CACHE_SIZE
- 1) >>
1670 block_ctx
->mem_to_free
= kzalloc((sizeof(*block_ctx
->datav
) +
1671 sizeof(*block_ctx
->pagev
)) *
1672 num_pages
, GFP_NOFS
);
1673 if (!block_ctx
->mem_to_free
)
1675 block_ctx
->datav
= block_ctx
->mem_to_free
;
1676 block_ctx
->pagev
= (struct page
**)(block_ctx
->datav
+ num_pages
);
1677 for (i
= 0; i
< num_pages
; i
++) {
1678 block_ctx
->pagev
[i
] = alloc_page(GFP_NOFS
);
1679 if (!block_ctx
->pagev
[i
])
1683 dev_bytenr
= block_ctx
->dev_bytenr
;
1684 for (i
= 0; i
< num_pages
;) {
1687 DECLARE_COMPLETION_ONSTACK(complete
);
1689 bio
= bio_alloc(GFP_NOFS
, num_pages
- i
);
1692 "btrfsic: bio_alloc() for %u pages failed!\n",
1696 bio
->bi_bdev
= block_ctx
->dev
->bdev
;
1697 bio
->bi_sector
= dev_bytenr
>> 9;
1698 bio
->bi_end_io
= btrfsic_complete_bio_end_io
;
1699 bio
->bi_private
= &complete
;
1701 for (j
= i
; j
< num_pages
; j
++) {
1702 ret
= bio_add_page(bio
, block_ctx
->pagev
[j
],
1703 PAGE_CACHE_SIZE
, 0);
1704 if (PAGE_CACHE_SIZE
!= ret
)
1709 "btrfsic: error, failed to add a single page!\n");
1712 submit_bio(READ
, bio
);
1714 /* this will also unplug the queue */
1715 wait_for_completion(&complete
);
1717 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
1719 "btrfsic: read error at logical %llu dev %s!\n",
1720 block_ctx
->start
, block_ctx
->dev
->name
);
1725 dev_bytenr
+= (j
- i
) * PAGE_CACHE_SIZE
;
1728 for (i
= 0; i
< num_pages
; i
++) {
1729 block_ctx
->datav
[i
] = kmap(block_ctx
->pagev
[i
]);
1730 if (!block_ctx
->datav
[i
]) {
1731 printk(KERN_INFO
"btrfsic: kmap() failed (dev %s)!\n",
1732 block_ctx
->dev
->name
);
1737 return block_ctx
->len
;
1740 static void btrfsic_complete_bio_end_io(struct bio
*bio
, int err
)
1742 complete((struct completion
*)bio
->bi_private
);
1745 static void btrfsic_dump_database(struct btrfsic_state
*state
)
1747 struct list_head
*elem_all
;
1749 BUG_ON(NULL
== state
);
1751 printk(KERN_INFO
"all_blocks_list:\n");
1752 list_for_each(elem_all
, &state
->all_blocks_list
) {
1753 const struct btrfsic_block
*const b_all
=
1754 list_entry(elem_all
, struct btrfsic_block
,
1756 struct list_head
*elem_ref_to
;
1757 struct list_head
*elem_ref_from
;
1759 printk(KERN_INFO
"%c-block @%llu (%s/%llu/%d)\n",
1760 btrfsic_get_block_type(state
, b_all
),
1761 (unsigned long long)b_all
->logical_bytenr
,
1762 b_all
->dev_state
->name
,
1763 (unsigned long long)b_all
->dev_bytenr
,
1766 list_for_each(elem_ref_to
, &b_all
->ref_to_list
) {
1767 const struct btrfsic_block_link
*const l
=
1768 list_entry(elem_ref_to
,
1769 struct btrfsic_block_link
,
1772 printk(KERN_INFO
" %c @%llu (%s/%llu/%d)"
1774 " %c @%llu (%s/%llu/%d)\n",
1775 btrfsic_get_block_type(state
, b_all
),
1776 (unsigned long long)b_all
->logical_bytenr
,
1777 b_all
->dev_state
->name
,
1778 (unsigned long long)b_all
->dev_bytenr
,
1781 btrfsic_get_block_type(state
, l
->block_ref_to
),
1782 (unsigned long long)
1783 l
->block_ref_to
->logical_bytenr
,
1784 l
->block_ref_to
->dev_state
->name
,
1785 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
1786 l
->block_ref_to
->mirror_num
);
1789 list_for_each(elem_ref_from
, &b_all
->ref_from_list
) {
1790 const struct btrfsic_block_link
*const l
=
1791 list_entry(elem_ref_from
,
1792 struct btrfsic_block_link
,
1795 printk(KERN_INFO
" %c @%llu (%s/%llu/%d)"
1797 " %c @%llu (%s/%llu/%d)\n",
1798 btrfsic_get_block_type(state
, b_all
),
1799 (unsigned long long)b_all
->logical_bytenr
,
1800 b_all
->dev_state
->name
,
1801 (unsigned long long)b_all
->dev_bytenr
,
1804 btrfsic_get_block_type(state
, l
->block_ref_from
),
1805 (unsigned long long)
1806 l
->block_ref_from
->logical_bytenr
,
1807 l
->block_ref_from
->dev_state
->name
,
1808 (unsigned long long)
1809 l
->block_ref_from
->dev_bytenr
,
1810 l
->block_ref_from
->mirror_num
);
1813 printk(KERN_INFO
"\n");
1818 * Test whether the disk block contains a tree block (leaf or node)
1819 * (note that this test fails for the super block)
1821 static int btrfsic_test_for_metadata(struct btrfsic_state
*state
,
1822 char **datav
, unsigned int num_pages
)
1824 struct btrfs_header
*h
;
1825 u8 csum
[BTRFS_CSUM_SIZE
];
1829 if (num_pages
* PAGE_CACHE_SIZE
< state
->metablock_size
)
1830 return 1; /* not metadata */
1831 num_pages
= state
->metablock_size
>> PAGE_CACHE_SHIFT
;
1832 h
= (struct btrfs_header
*)datav
[0];
1834 if (memcmp(h
->fsid
, state
->root
->fs_info
->fsid
, BTRFS_UUID_SIZE
))
1837 for (i
= 0; i
< num_pages
; i
++) {
1838 u8
*data
= i
? datav
[i
] : (datav
[i
] + BTRFS_CSUM_SIZE
);
1839 size_t sublen
= i
? PAGE_CACHE_SIZE
:
1840 (PAGE_CACHE_SIZE
- BTRFS_CSUM_SIZE
);
1842 crc
= crc32c(crc
, data
, sublen
);
1844 btrfs_csum_final(crc
, csum
);
1845 if (memcmp(csum
, h
->csum
, state
->csum_size
))
1848 return 0; /* is metadata */
1851 static void btrfsic_process_written_block(struct btrfsic_dev_state
*dev_state
,
1852 u64 dev_bytenr
, char **mapped_datav
,
1853 unsigned int num_pages
,
1854 struct bio
*bio
, int *bio_is_patched
,
1855 struct buffer_head
*bh
,
1856 int submit_bio_bh_rw
)
1859 struct btrfsic_block
*block
;
1860 struct btrfsic_block_data_ctx block_ctx
;
1862 struct btrfsic_state
*state
= dev_state
->state
;
1863 struct block_device
*bdev
= dev_state
->bdev
;
1864 unsigned int processed_len
;
1866 if (NULL
!= bio_is_patched
)
1867 *bio_is_patched
= 0;
1874 is_metadata
= (0 == btrfsic_test_for_metadata(state
, mapped_datav
,
1877 block
= btrfsic_block_hashtable_lookup(bdev
, dev_bytenr
,
1878 &state
->block_hashtable
);
1879 if (NULL
!= block
) {
1881 struct list_head
*elem_ref_to
;
1882 struct list_head
*tmp_ref_to
;
1884 if (block
->is_superblock
) {
1885 bytenr
= le64_to_cpu(((struct btrfs_super_block
*)
1886 mapped_datav
[0])->bytenr
);
1887 if (num_pages
* PAGE_CACHE_SIZE
<
1888 BTRFS_SUPER_INFO_SIZE
) {
1890 "btrfsic: cannot work with too short bios!\n");
1894 BUG_ON(BTRFS_SUPER_INFO_SIZE
& (PAGE_CACHE_SIZE
- 1));
1895 processed_len
= BTRFS_SUPER_INFO_SIZE
;
1896 if (state
->print_mask
&
1897 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE
) {
1899 "[before new superblock is written]:\n");
1900 btrfsic_dump_tree_sub(state
, block
, 0);
1904 if (!block
->is_superblock
) {
1905 if (num_pages
* PAGE_CACHE_SIZE
<
1906 state
->metablock_size
) {
1908 "btrfsic: cannot work with too short bios!\n");
1911 processed_len
= state
->metablock_size
;
1912 bytenr
= le64_to_cpu(((struct btrfs_header
*)
1913 mapped_datav
[0])->bytenr
);
1914 btrfsic_cmp_log_and_dev_bytenr(state
, bytenr
,
1918 if (block
->logical_bytenr
!= bytenr
) {
1920 "Written block @%llu (%s/%llu/%d)"
1921 " found in hash table, %c,"
1923 " (!= stored %llu).\n",
1924 (unsigned long long)bytenr
,
1926 (unsigned long long)dev_bytenr
,
1928 btrfsic_get_block_type(state
, block
),
1929 (unsigned long long)
1930 block
->logical_bytenr
);
1931 block
->logical_bytenr
= bytenr
;
1932 } else if (state
->print_mask
&
1933 BTRFSIC_PRINT_MASK_VERBOSE
)
1935 "Written block @%llu (%s/%llu/%d)"
1936 " found in hash table, %c.\n",
1937 (unsigned long long)bytenr
,
1939 (unsigned long long)dev_bytenr
,
1941 btrfsic_get_block_type(state
, block
));
1943 if (num_pages
* PAGE_CACHE_SIZE
<
1944 state
->datablock_size
) {
1946 "btrfsic: cannot work with too short bios!\n");
1949 processed_len
= state
->datablock_size
;
1950 bytenr
= block
->logical_bytenr
;
1951 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1953 "Written block @%llu (%s/%llu/%d)"
1954 " found in hash table, %c.\n",
1955 (unsigned long long)bytenr
,
1957 (unsigned long long)dev_bytenr
,
1959 btrfsic_get_block_type(state
, block
));
1962 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
1964 "ref_to_list: %cE, ref_from_list: %cE\n",
1965 list_empty(&block
->ref_to_list
) ? ' ' : '!',
1966 list_empty(&block
->ref_from_list
) ? ' ' : '!');
1967 if (btrfsic_is_block_ref_by_superblock(state
, block
, 0)) {
1968 printk(KERN_INFO
"btrfs: attempt to overwrite %c-block"
1969 " @%llu (%s/%llu/%d), old(gen=%llu,"
1970 " objectid=%llu, type=%d, offset=%llu),"
1972 " which is referenced by most recent superblock"
1973 " (superblockgen=%llu)!\n",
1974 btrfsic_get_block_type(state
, block
),
1975 (unsigned long long)bytenr
,
1977 (unsigned long long)dev_bytenr
,
1979 (unsigned long long)block
->generation
,
1980 (unsigned long long)
1981 le64_to_cpu(block
->disk_key
.objectid
),
1982 block
->disk_key
.type
,
1983 (unsigned long long)
1984 le64_to_cpu(block
->disk_key
.offset
),
1985 (unsigned long long)
1986 le64_to_cpu(((struct btrfs_header
*)
1987 mapped_datav
[0])->generation
),
1988 (unsigned long long)
1989 state
->max_superblock_generation
);
1990 btrfsic_dump_tree(state
);
1993 if (!block
->is_iodone
&& !block
->never_written
) {
1994 printk(KERN_INFO
"btrfs: attempt to overwrite %c-block"
1995 " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
1996 " which is not yet iodone!\n",
1997 btrfsic_get_block_type(state
, block
),
1998 (unsigned long long)bytenr
,
2000 (unsigned long long)dev_bytenr
,
2002 (unsigned long long)block
->generation
,
2003 (unsigned long long)
2004 le64_to_cpu(((struct btrfs_header
*)
2005 mapped_datav
[0])->generation
));
2006 /* it would not be safe to go on */
2007 btrfsic_dump_tree(state
);
2012 * Clear all references of this block. Do not free
2013 * the block itself even if is not referenced anymore
2014 * because it still carries valueable information
2015 * like whether it was ever written and IO completed.
2017 list_for_each_safe(elem_ref_to
, tmp_ref_to
,
2018 &block
->ref_to_list
) {
2019 struct btrfsic_block_link
*const l
=
2020 list_entry(elem_ref_to
,
2021 struct btrfsic_block_link
,
2024 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2025 btrfsic_print_rem_link(state
, l
);
2027 if (0 == l
->ref_cnt
) {
2028 list_del(&l
->node_ref_to
);
2029 list_del(&l
->node_ref_from
);
2030 btrfsic_block_link_hashtable_remove(l
);
2031 btrfsic_block_link_free(l
);
2035 if (block
->is_superblock
)
2036 ret
= btrfsic_map_superblock(state
, bytenr
,
2040 ret
= btrfsic_map_block(state
, bytenr
, processed_len
,
2044 "btrfsic: btrfsic_map_block(root @%llu)"
2045 " failed!\n", (unsigned long long)bytenr
);
2048 block_ctx
.datav
= mapped_datav
;
2049 /* the following is required in case of writes to mirrors,
2050 * use the same that was used for the lookup */
2051 block_ctx
.dev
= dev_state
;
2052 block_ctx
.dev_bytenr
= dev_bytenr
;
2054 if (is_metadata
|| state
->include_extent_data
) {
2055 block
->never_written
= 0;
2056 block
->iodone_w_error
= 0;
2058 block
->is_iodone
= 0;
2059 BUG_ON(NULL
== bio_is_patched
);
2060 if (!*bio_is_patched
) {
2061 block
->orig_bio_bh_private
=
2063 block
->orig_bio_bh_end_io
.bio
=
2065 block
->next_in_same_bio
= NULL
;
2066 bio
->bi_private
= block
;
2067 bio
->bi_end_io
= btrfsic_bio_end_io
;
2068 *bio_is_patched
= 1;
2070 struct btrfsic_block
*chained_block
=
2071 (struct btrfsic_block
*)
2074 BUG_ON(NULL
== chained_block
);
2075 block
->orig_bio_bh_private
=
2076 chained_block
->orig_bio_bh_private
;
2077 block
->orig_bio_bh_end_io
.bio
=
2078 chained_block
->orig_bio_bh_end_io
.
2080 block
->next_in_same_bio
= chained_block
;
2081 bio
->bi_private
= block
;
2083 } else if (NULL
!= bh
) {
2084 block
->is_iodone
= 0;
2085 block
->orig_bio_bh_private
= bh
->b_private
;
2086 block
->orig_bio_bh_end_io
.bh
= bh
->b_end_io
;
2087 block
->next_in_same_bio
= NULL
;
2088 bh
->b_private
= block
;
2089 bh
->b_end_io
= btrfsic_bh_end_io
;
2091 block
->is_iodone
= 1;
2092 block
->orig_bio_bh_private
= NULL
;
2093 block
->orig_bio_bh_end_io
.bio
= NULL
;
2094 block
->next_in_same_bio
= NULL
;
2098 block
->flush_gen
= dev_state
->last_flush_gen
+ 1;
2099 block
->submit_bio_bh_rw
= submit_bio_bh_rw
;
2101 block
->logical_bytenr
= bytenr
;
2102 block
->is_metadata
= 1;
2103 if (block
->is_superblock
) {
2104 BUG_ON(PAGE_CACHE_SIZE
!=
2105 BTRFS_SUPER_INFO_SIZE
);
2106 ret
= btrfsic_process_written_superblock(
2109 (struct btrfs_super_block
*)
2111 if (state
->print_mask
&
2112 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE
) {
2114 "[after new superblock is written]:\n");
2115 btrfsic_dump_tree_sub(state
, block
, 0);
2118 block
->mirror_num
= 0; /* unknown */
2119 ret
= btrfsic_process_metablock(
2127 "btrfsic: btrfsic_process_metablock"
2128 "(root @%llu) failed!\n",
2129 (unsigned long long)dev_bytenr
);
2131 block
->is_metadata
= 0;
2132 block
->mirror_num
= 0; /* unknown */
2133 block
->generation
= BTRFSIC_GENERATION_UNKNOWN
;
2134 if (!state
->include_extent_data
2135 && list_empty(&block
->ref_from_list
)) {
2137 * disk block is overwritten with extent
2138 * data (not meta data) and we are configured
2139 * to not include extent data: take the
2140 * chance and free the block's memory
2142 btrfsic_block_hashtable_remove(block
);
2143 list_del(&block
->all_blocks_node
);
2144 btrfsic_block_free(block
);
2147 btrfsic_release_block_ctx(&block_ctx
);
2149 /* block has not been found in hash table */
2153 processed_len
= state
->datablock_size
;
2154 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2155 printk(KERN_INFO
"Written block (%s/%llu/?)"
2156 " !found in hash table, D.\n",
2158 (unsigned long long)dev_bytenr
);
2159 if (!state
->include_extent_data
) {
2160 /* ignore that written D block */
2164 /* this is getting ugly for the
2165 * include_extent_data case... */
2166 bytenr
= 0; /* unknown */
2167 block_ctx
.start
= bytenr
;
2168 block_ctx
.len
= processed_len
;
2169 block_ctx
.mem_to_free
= NULL
;
2170 block_ctx
.pagev
= NULL
;
2172 processed_len
= state
->metablock_size
;
2173 bytenr
= le64_to_cpu(((struct btrfs_header
*)
2174 mapped_datav
[0])->bytenr
);
2175 btrfsic_cmp_log_and_dev_bytenr(state
, bytenr
, dev_state
,
2177 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2179 "Written block @%llu (%s/%llu/?)"
2180 " !found in hash table, M.\n",
2181 (unsigned long long)bytenr
,
2183 (unsigned long long)dev_bytenr
);
2185 ret
= btrfsic_map_block(state
, bytenr
, processed_len
,
2189 "btrfsic: btrfsic_map_block(root @%llu)"
2191 (unsigned long long)dev_bytenr
);
2195 block_ctx
.datav
= mapped_datav
;
2196 /* the following is required in case of writes to mirrors,
2197 * use the same that was used for the lookup */
2198 block_ctx
.dev
= dev_state
;
2199 block_ctx
.dev_bytenr
= dev_bytenr
;
2201 block
= btrfsic_block_alloc();
2202 if (NULL
== block
) {
2203 printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
2204 btrfsic_release_block_ctx(&block_ctx
);
2207 block
->dev_state
= dev_state
;
2208 block
->dev_bytenr
= dev_bytenr
;
2209 block
->logical_bytenr
= bytenr
;
2210 block
->is_metadata
= is_metadata
;
2211 block
->never_written
= 0;
2212 block
->iodone_w_error
= 0;
2213 block
->mirror_num
= 0; /* unknown */
2214 block
->flush_gen
= dev_state
->last_flush_gen
+ 1;
2215 block
->submit_bio_bh_rw
= submit_bio_bh_rw
;
2217 block
->is_iodone
= 0;
2218 BUG_ON(NULL
== bio_is_patched
);
2219 if (!*bio_is_patched
) {
2220 block
->orig_bio_bh_private
= bio
->bi_private
;
2221 block
->orig_bio_bh_end_io
.bio
= bio
->bi_end_io
;
2222 block
->next_in_same_bio
= NULL
;
2223 bio
->bi_private
= block
;
2224 bio
->bi_end_io
= btrfsic_bio_end_io
;
2225 *bio_is_patched
= 1;
2227 struct btrfsic_block
*chained_block
=
2228 (struct btrfsic_block
*)
2231 BUG_ON(NULL
== chained_block
);
2232 block
->orig_bio_bh_private
=
2233 chained_block
->orig_bio_bh_private
;
2234 block
->orig_bio_bh_end_io
.bio
=
2235 chained_block
->orig_bio_bh_end_io
.bio
;
2236 block
->next_in_same_bio
= chained_block
;
2237 bio
->bi_private
= block
;
2239 } else if (NULL
!= bh
) {
2240 block
->is_iodone
= 0;
2241 block
->orig_bio_bh_private
= bh
->b_private
;
2242 block
->orig_bio_bh_end_io
.bh
= bh
->b_end_io
;
2243 block
->next_in_same_bio
= NULL
;
2244 bh
->b_private
= block
;
2245 bh
->b_end_io
= btrfsic_bh_end_io
;
2247 block
->is_iodone
= 1;
2248 block
->orig_bio_bh_private
= NULL
;
2249 block
->orig_bio_bh_end_io
.bio
= NULL
;
2250 block
->next_in_same_bio
= NULL
;
2252 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2254 "New written %c-block @%llu (%s/%llu/%d)\n",
2255 is_metadata
? 'M' : 'D',
2256 (unsigned long long)block
->logical_bytenr
,
2257 block
->dev_state
->name
,
2258 (unsigned long long)block
->dev_bytenr
,
2260 list_add(&block
->all_blocks_node
, &state
->all_blocks_list
);
2261 btrfsic_block_hashtable_add(block
, &state
->block_hashtable
);
2264 ret
= btrfsic_process_metablock(state
, block
,
2268 "btrfsic: process_metablock(root @%llu)"
2270 (unsigned long long)dev_bytenr
);
2272 btrfsic_release_block_ctx(&block_ctx
);
2276 BUG_ON(!processed_len
);
2277 dev_bytenr
+= processed_len
;
2278 mapped_datav
+= processed_len
>> PAGE_CACHE_SHIFT
;
2279 num_pages
-= processed_len
>> PAGE_CACHE_SHIFT
;
2283 static void btrfsic_bio_end_io(struct bio
*bp
, int bio_error_status
)
2285 struct btrfsic_block
*block
= (struct btrfsic_block
*)bp
->bi_private
;
2288 /* mutex is not held! This is not save if IO is not yet completed
2291 if (bio_error_status
)
2294 BUG_ON(NULL
== block
);
2295 bp
->bi_private
= block
->orig_bio_bh_private
;
2296 bp
->bi_end_io
= block
->orig_bio_bh_end_io
.bio
;
2299 struct btrfsic_block
*next_block
;
2300 struct btrfsic_dev_state
*const dev_state
= block
->dev_state
;
2302 if ((dev_state
->state
->print_mask
&
2303 BTRFSIC_PRINT_MASK_END_IO_BIO_BH
))
2305 "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
2307 btrfsic_get_block_type(dev_state
->state
, block
),
2308 (unsigned long long)block
->logical_bytenr
,
2310 (unsigned long long)block
->dev_bytenr
,
2312 next_block
= block
->next_in_same_bio
;
2313 block
->iodone_w_error
= iodone_w_error
;
2314 if (block
->submit_bio_bh_rw
& REQ_FLUSH
) {
2315 dev_state
->last_flush_gen
++;
2316 if ((dev_state
->state
->print_mask
&
2317 BTRFSIC_PRINT_MASK_END_IO_BIO_BH
))
2319 "bio_end_io() new %s flush_gen=%llu\n",
2321 (unsigned long long)
2322 dev_state
->last_flush_gen
);
2324 if (block
->submit_bio_bh_rw
& REQ_FUA
)
2325 block
->flush_gen
= 0; /* FUA completed means block is
2327 block
->is_iodone
= 1; /* for FLUSH, this releases the block */
2329 } while (NULL
!= block
);
2331 bp
->bi_end_io(bp
, bio_error_status
);
2334 static void btrfsic_bh_end_io(struct buffer_head
*bh
, int uptodate
)
2336 struct btrfsic_block
*block
= (struct btrfsic_block
*)bh
->b_private
;
2337 int iodone_w_error
= !uptodate
;
2338 struct btrfsic_dev_state
*dev_state
;
2340 BUG_ON(NULL
== block
);
2341 dev_state
= block
->dev_state
;
2342 if ((dev_state
->state
->print_mask
& BTRFSIC_PRINT_MASK_END_IO_BIO_BH
))
2344 "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
2346 btrfsic_get_block_type(dev_state
->state
, block
),
2347 (unsigned long long)block
->logical_bytenr
,
2348 block
->dev_state
->name
,
2349 (unsigned long long)block
->dev_bytenr
,
2352 block
->iodone_w_error
= iodone_w_error
;
2353 if (block
->submit_bio_bh_rw
& REQ_FLUSH
) {
2354 dev_state
->last_flush_gen
++;
2355 if ((dev_state
->state
->print_mask
&
2356 BTRFSIC_PRINT_MASK_END_IO_BIO_BH
))
2358 "bh_end_io() new %s flush_gen=%llu\n",
2360 (unsigned long long)dev_state
->last_flush_gen
);
2362 if (block
->submit_bio_bh_rw
& REQ_FUA
)
2363 block
->flush_gen
= 0; /* FUA completed means block is on disk */
2365 bh
->b_private
= block
->orig_bio_bh_private
;
2366 bh
->b_end_io
= block
->orig_bio_bh_end_io
.bh
;
2367 block
->is_iodone
= 1; /* for FLUSH, this releases the block */
2368 bh
->b_end_io(bh
, uptodate
);
2371 static int btrfsic_process_written_superblock(
2372 struct btrfsic_state
*state
,
2373 struct btrfsic_block
*const superblock
,
2374 struct btrfs_super_block
*const super_hdr
)
2378 superblock
->generation
= btrfs_super_generation(super_hdr
);
2379 if (!(superblock
->generation
> state
->max_superblock_generation
||
2380 0 == state
->max_superblock_generation
)) {
2381 if (state
->print_mask
& BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE
)
2383 "btrfsic: superblock @%llu (%s/%llu/%d)"
2384 " with old gen %llu <= %llu\n",
2385 (unsigned long long)superblock
->logical_bytenr
,
2386 superblock
->dev_state
->name
,
2387 (unsigned long long)superblock
->dev_bytenr
,
2388 superblock
->mirror_num
,
2389 (unsigned long long)
2390 btrfs_super_generation(super_hdr
),
2391 (unsigned long long)
2392 state
->max_superblock_generation
);
2394 if (state
->print_mask
& BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE
)
2396 "btrfsic: got new superblock @%llu (%s/%llu/%d)"
2397 " with new gen %llu > %llu\n",
2398 (unsigned long long)superblock
->logical_bytenr
,
2399 superblock
->dev_state
->name
,
2400 (unsigned long long)superblock
->dev_bytenr
,
2401 superblock
->mirror_num
,
2402 (unsigned long long)
2403 btrfs_super_generation(super_hdr
),
2404 (unsigned long long)
2405 state
->max_superblock_generation
);
2407 state
->max_superblock_generation
=
2408 btrfs_super_generation(super_hdr
);
2409 state
->latest_superblock
= superblock
;
2412 for (pass
= 0; pass
< 3; pass
++) {
2415 struct btrfsic_block
*next_block
;
2416 struct btrfsic_block_data_ctx tmp_next_block_ctx
;
2417 struct btrfsic_block_link
*l
;
2420 const char *additional_string
= NULL
;
2421 struct btrfs_disk_key tmp_disk_key
;
2423 tmp_disk_key
.type
= BTRFS_ROOT_ITEM_KEY
;
2424 tmp_disk_key
.offset
= 0;
2428 tmp_disk_key
.objectid
=
2429 cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID
);
2430 additional_string
= "root ";
2431 next_bytenr
= btrfs_super_root(super_hdr
);
2432 if (state
->print_mask
&
2433 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION
)
2434 printk(KERN_INFO
"root@%llu\n",
2435 (unsigned long long)next_bytenr
);
2438 tmp_disk_key
.objectid
=
2439 cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID
);
2440 additional_string
= "chunk ";
2441 next_bytenr
= btrfs_super_chunk_root(super_hdr
);
2442 if (state
->print_mask
&
2443 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION
)
2444 printk(KERN_INFO
"chunk@%llu\n",
2445 (unsigned long long)next_bytenr
);
2448 tmp_disk_key
.objectid
=
2449 cpu_to_le64(BTRFS_TREE_LOG_OBJECTID
);
2450 additional_string
= "log ";
2451 next_bytenr
= btrfs_super_log_root(super_hdr
);
2452 if (0 == next_bytenr
)
2454 if (state
->print_mask
&
2455 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION
)
2456 printk(KERN_INFO
"log@%llu\n",
2457 (unsigned long long)next_bytenr
);
2462 btrfs_num_copies(&state
->root
->fs_info
->mapping_tree
,
2463 next_bytenr
, BTRFS_SUPER_INFO_SIZE
);
2464 if (state
->print_mask
& BTRFSIC_PRINT_MASK_NUM_COPIES
)
2465 printk(KERN_INFO
"num_copies(log_bytenr=%llu) = %d\n",
2466 (unsigned long long)next_bytenr
, num_copies
);
2467 for (mirror_num
= 1; mirror_num
<= num_copies
; mirror_num
++) {
2470 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2472 "btrfsic_process_written_superblock("
2473 "mirror_num=%d)\n", mirror_num
);
2474 ret
= btrfsic_map_block(state
, next_bytenr
,
2475 BTRFS_SUPER_INFO_SIZE
,
2476 &tmp_next_block_ctx
,
2480 "btrfsic: btrfsic_map_block(@%llu,"
2481 " mirror=%d) failed!\n",
2482 (unsigned long long)next_bytenr
,
2487 next_block
= btrfsic_block_lookup_or_add(
2489 &tmp_next_block_ctx
,
2494 if (NULL
== next_block
) {
2496 "btrfsic: error, kmalloc failed!\n");
2497 btrfsic_release_block_ctx(&tmp_next_block_ctx
);
2501 next_block
->disk_key
= tmp_disk_key
;
2503 next_block
->generation
=
2504 BTRFSIC_GENERATION_UNKNOWN
;
2505 l
= btrfsic_block_link_lookup_or_add(
2507 &tmp_next_block_ctx
,
2510 BTRFSIC_GENERATION_UNKNOWN
);
2511 btrfsic_release_block_ctx(&tmp_next_block_ctx
);
2517 if (-1 == btrfsic_check_all_ref_blocks(state
, superblock
, 0)) {
2519 btrfsic_dump_tree(state
);
2525 static int btrfsic_check_all_ref_blocks(struct btrfsic_state
*state
,
2526 struct btrfsic_block
*const block
,
2527 int recursion_level
)
2529 struct list_head
*elem_ref_to
;
2532 if (recursion_level
>= 3 + BTRFS_MAX_LEVEL
) {
2534 * Note that this situation can happen and does not
2535 * indicate an error in regular cases. It happens
2536 * when disk blocks are freed and later reused.
2537 * The check-integrity module is not aware of any
2538 * block free operations, it just recognizes block
2539 * write operations. Therefore it keeps the linkage
2540 * information for a block until a block is
2541 * rewritten. This can temporarily cause incorrect
2542 * and even circular linkage informations. This
2543 * causes no harm unless such blocks are referenced
2544 * by the most recent super block.
2546 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2548 "btrfsic: abort cyclic linkage (case 1).\n");
2554 * This algorithm is recursive because the amount of used stack
2555 * space is very small and the max recursion depth is limited.
2557 list_for_each(elem_ref_to
, &block
->ref_to_list
) {
2558 const struct btrfsic_block_link
*const l
=
2559 list_entry(elem_ref_to
, struct btrfsic_block_link
,
2562 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2564 "rl=%d, %c @%llu (%s/%llu/%d)"
2565 " %u* refers to %c @%llu (%s/%llu/%d)\n",
2567 btrfsic_get_block_type(state
, block
),
2568 (unsigned long long)block
->logical_bytenr
,
2569 block
->dev_state
->name
,
2570 (unsigned long long)block
->dev_bytenr
,
2573 btrfsic_get_block_type(state
, l
->block_ref_to
),
2574 (unsigned long long)
2575 l
->block_ref_to
->logical_bytenr
,
2576 l
->block_ref_to
->dev_state
->name
,
2577 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2578 l
->block_ref_to
->mirror_num
);
2579 if (l
->block_ref_to
->never_written
) {
2580 printk(KERN_INFO
"btrfs: attempt to write superblock"
2581 " which references block %c @%llu (%s/%llu/%d)"
2582 " which is never written!\n",
2583 btrfsic_get_block_type(state
, l
->block_ref_to
),
2584 (unsigned long long)
2585 l
->block_ref_to
->logical_bytenr
,
2586 l
->block_ref_to
->dev_state
->name
,
2587 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2588 l
->block_ref_to
->mirror_num
);
2590 } else if (!l
->block_ref_to
->is_iodone
) {
2591 printk(KERN_INFO
"btrfs: attempt to write superblock"
2592 " which references block %c @%llu (%s/%llu/%d)"
2593 " which is not yet iodone!\n",
2594 btrfsic_get_block_type(state
, l
->block_ref_to
),
2595 (unsigned long long)
2596 l
->block_ref_to
->logical_bytenr
,
2597 l
->block_ref_to
->dev_state
->name
,
2598 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2599 l
->block_ref_to
->mirror_num
);
2601 } else if (l
->parent_generation
!=
2602 l
->block_ref_to
->generation
&&
2603 BTRFSIC_GENERATION_UNKNOWN
!=
2604 l
->parent_generation
&&
2605 BTRFSIC_GENERATION_UNKNOWN
!=
2606 l
->block_ref_to
->generation
) {
2607 printk(KERN_INFO
"btrfs: attempt to write superblock"
2608 " which references block %c @%llu (%s/%llu/%d)"
2609 " with generation %llu !="
2610 " parent generation %llu!\n",
2611 btrfsic_get_block_type(state
, l
->block_ref_to
),
2612 (unsigned long long)
2613 l
->block_ref_to
->logical_bytenr
,
2614 l
->block_ref_to
->dev_state
->name
,
2615 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2616 l
->block_ref_to
->mirror_num
,
2617 (unsigned long long)l
->block_ref_to
->generation
,
2618 (unsigned long long)l
->parent_generation
);
2620 } else if (l
->block_ref_to
->flush_gen
>
2621 l
->block_ref_to
->dev_state
->last_flush_gen
) {
2622 printk(KERN_INFO
"btrfs: attempt to write superblock"
2623 " which references block %c @%llu (%s/%llu/%d)"
2624 " which is not flushed out of disk's write cache"
2625 " (block flush_gen=%llu,"
2626 " dev->flush_gen=%llu)!\n",
2627 btrfsic_get_block_type(state
, l
->block_ref_to
),
2628 (unsigned long long)
2629 l
->block_ref_to
->logical_bytenr
,
2630 l
->block_ref_to
->dev_state
->name
,
2631 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2632 l
->block_ref_to
->mirror_num
,
2633 (unsigned long long)block
->flush_gen
,
2634 (unsigned long long)
2635 l
->block_ref_to
->dev_state
->last_flush_gen
);
2637 } else if (-1 == btrfsic_check_all_ref_blocks(state
,
2648 static int btrfsic_is_block_ref_by_superblock(
2649 const struct btrfsic_state
*state
,
2650 const struct btrfsic_block
*block
,
2651 int recursion_level
)
2653 struct list_head
*elem_ref_from
;
2655 if (recursion_level
>= 3 + BTRFS_MAX_LEVEL
) {
2656 /* refer to comment at "abort cyclic linkage (case 1)" */
2657 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2659 "btrfsic: abort cyclic linkage (case 2).\n");
2665 * This algorithm is recursive because the amount of used stack space
2666 * is very small and the max recursion depth is limited.
2668 list_for_each(elem_ref_from
, &block
->ref_from_list
) {
2669 const struct btrfsic_block_link
*const l
=
2670 list_entry(elem_ref_from
, struct btrfsic_block_link
,
2673 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2675 "rl=%d, %c @%llu (%s/%llu/%d)"
2676 " is ref %u* from %c @%llu (%s/%llu/%d)\n",
2678 btrfsic_get_block_type(state
, block
),
2679 (unsigned long long)block
->logical_bytenr
,
2680 block
->dev_state
->name
,
2681 (unsigned long long)block
->dev_bytenr
,
2684 btrfsic_get_block_type(state
, l
->block_ref_from
),
2685 (unsigned long long)
2686 l
->block_ref_from
->logical_bytenr
,
2687 l
->block_ref_from
->dev_state
->name
,
2688 (unsigned long long)
2689 l
->block_ref_from
->dev_bytenr
,
2690 l
->block_ref_from
->mirror_num
);
2691 if (l
->block_ref_from
->is_superblock
&&
2692 state
->latest_superblock
->dev_bytenr
==
2693 l
->block_ref_from
->dev_bytenr
&&
2694 state
->latest_superblock
->dev_state
->bdev
==
2695 l
->block_ref_from
->dev_state
->bdev
)
2697 else if (btrfsic_is_block_ref_by_superblock(state
,
2707 static void btrfsic_print_add_link(const struct btrfsic_state
*state
,
2708 const struct btrfsic_block_link
*l
)
2711 "Add %u* link from %c @%llu (%s/%llu/%d)"
2712 " to %c @%llu (%s/%llu/%d).\n",
2714 btrfsic_get_block_type(state
, l
->block_ref_from
),
2715 (unsigned long long)l
->block_ref_from
->logical_bytenr
,
2716 l
->block_ref_from
->dev_state
->name
,
2717 (unsigned long long)l
->block_ref_from
->dev_bytenr
,
2718 l
->block_ref_from
->mirror_num
,
2719 btrfsic_get_block_type(state
, l
->block_ref_to
),
2720 (unsigned long long)l
->block_ref_to
->logical_bytenr
,
2721 l
->block_ref_to
->dev_state
->name
,
2722 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2723 l
->block_ref_to
->mirror_num
);
2726 static void btrfsic_print_rem_link(const struct btrfsic_state
*state
,
2727 const struct btrfsic_block_link
*l
)
2730 "Rem %u* link from %c @%llu (%s/%llu/%d)"
2731 " to %c @%llu (%s/%llu/%d).\n",
2733 btrfsic_get_block_type(state
, l
->block_ref_from
),
2734 (unsigned long long)l
->block_ref_from
->logical_bytenr
,
2735 l
->block_ref_from
->dev_state
->name
,
2736 (unsigned long long)l
->block_ref_from
->dev_bytenr
,
2737 l
->block_ref_from
->mirror_num
,
2738 btrfsic_get_block_type(state
, l
->block_ref_to
),
2739 (unsigned long long)l
->block_ref_to
->logical_bytenr
,
2740 l
->block_ref_to
->dev_state
->name
,
2741 (unsigned long long)l
->block_ref_to
->dev_bytenr
,
2742 l
->block_ref_to
->mirror_num
);
2745 static char btrfsic_get_block_type(const struct btrfsic_state
*state
,
2746 const struct btrfsic_block
*block
)
2748 if (block
->is_superblock
&&
2749 state
->latest_superblock
->dev_bytenr
== block
->dev_bytenr
&&
2750 state
->latest_superblock
->dev_state
->bdev
== block
->dev_state
->bdev
)
2752 else if (block
->is_superblock
)
2754 else if (block
->is_metadata
)
2760 static void btrfsic_dump_tree(const struct btrfsic_state
*state
)
2762 btrfsic_dump_tree_sub(state
, state
->latest_superblock
, 0);
2765 static void btrfsic_dump_tree_sub(const struct btrfsic_state
*state
,
2766 const struct btrfsic_block
*block
,
2769 struct list_head
*elem_ref_to
;
2771 static char buf
[80];
2772 int cursor_position
;
2775 * Should better fill an on-stack buffer with a complete line and
2776 * dump it at once when it is time to print a newline character.
2780 * This algorithm is recursive because the amount of used stack space
2781 * is very small and the max recursion depth is limited.
2783 indent_add
= sprintf(buf
, "%c-%llu(%s/%llu/%d)",
2784 btrfsic_get_block_type(state
, block
),
2785 (unsigned long long)block
->logical_bytenr
,
2786 block
->dev_state
->name
,
2787 (unsigned long long)block
->dev_bytenr
,
2789 if (indent_level
+ indent_add
> BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL
) {
2794 indent_level
+= indent_add
;
2795 if (list_empty(&block
->ref_to_list
)) {
2799 if (block
->mirror_num
> 1 &&
2800 !(state
->print_mask
& BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS
)) {
2805 cursor_position
= indent_level
;
2806 list_for_each(elem_ref_to
, &block
->ref_to_list
) {
2807 const struct btrfsic_block_link
*const l
=
2808 list_entry(elem_ref_to
, struct btrfsic_block_link
,
2811 while (cursor_position
< indent_level
) {
2816 indent_add
= sprintf(buf
, " %d*--> ", l
->ref_cnt
);
2818 indent_add
= sprintf(buf
, " --> ");
2819 if (indent_level
+ indent_add
>
2820 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL
) {
2822 cursor_position
= 0;
2828 btrfsic_dump_tree_sub(state
, l
->block_ref_to
,
2829 indent_level
+ indent_add
);
2830 cursor_position
= 0;
2834 static struct btrfsic_block_link
*btrfsic_block_link_lookup_or_add(
2835 struct btrfsic_state
*state
,
2836 struct btrfsic_block_data_ctx
*next_block_ctx
,
2837 struct btrfsic_block
*next_block
,
2838 struct btrfsic_block
*from_block
,
2839 u64 parent_generation
)
2841 struct btrfsic_block_link
*l
;
2843 l
= btrfsic_block_link_hashtable_lookup(next_block_ctx
->dev
->bdev
,
2844 next_block_ctx
->dev_bytenr
,
2845 from_block
->dev_state
->bdev
,
2846 from_block
->dev_bytenr
,
2847 &state
->block_link_hashtable
);
2849 l
= btrfsic_block_link_alloc();
2852 "btrfsic: error, kmalloc" " failed!\n");
2856 l
->block_ref_to
= next_block
;
2857 l
->block_ref_from
= from_block
;
2859 l
->parent_generation
= parent_generation
;
2861 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2862 btrfsic_print_add_link(state
, l
);
2864 list_add(&l
->node_ref_to
, &from_block
->ref_to_list
);
2865 list_add(&l
->node_ref_from
, &next_block
->ref_from_list
);
2867 btrfsic_block_link_hashtable_add(l
,
2868 &state
->block_link_hashtable
);
2871 l
->parent_generation
= parent_generation
;
2872 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2873 btrfsic_print_add_link(state
, l
);
2879 static struct btrfsic_block
*btrfsic_block_lookup_or_add(
2880 struct btrfsic_state
*state
,
2881 struct btrfsic_block_data_ctx
*block_ctx
,
2882 const char *additional_string
,
2889 struct btrfsic_block
*block
;
2891 block
= btrfsic_block_hashtable_lookup(block_ctx
->dev
->bdev
,
2892 block_ctx
->dev_bytenr
,
2893 &state
->block_hashtable
);
2894 if (NULL
== block
) {
2895 struct btrfsic_dev_state
*dev_state
;
2897 block
= btrfsic_block_alloc();
2898 if (NULL
== block
) {
2899 printk(KERN_INFO
"btrfsic: error, kmalloc failed!\n");
2902 dev_state
= btrfsic_dev_state_lookup(block_ctx
->dev
->bdev
);
2903 if (NULL
== dev_state
) {
2905 "btrfsic: error, lookup dev_state failed!\n");
2906 btrfsic_block_free(block
);
2909 block
->dev_state
= dev_state
;
2910 block
->dev_bytenr
= block_ctx
->dev_bytenr
;
2911 block
->logical_bytenr
= block_ctx
->start
;
2912 block
->is_metadata
= is_metadata
;
2913 block
->is_iodone
= is_iodone
;
2914 block
->never_written
= never_written
;
2915 block
->mirror_num
= mirror_num
;
2916 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
2918 "New %s%c-block @%llu (%s/%llu/%d)\n",
2920 btrfsic_get_block_type(state
, block
),
2921 (unsigned long long)block
->logical_bytenr
,
2923 (unsigned long long)block
->dev_bytenr
,
2925 list_add(&block
->all_blocks_node
, &state
->all_blocks_list
);
2926 btrfsic_block_hashtable_add(block
, &state
->block_hashtable
);
2927 if (NULL
!= was_created
)
2930 if (NULL
!= was_created
)
2937 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state
*state
,
2939 struct btrfsic_dev_state
*dev_state
,
2945 struct btrfsic_block_data_ctx block_ctx
;
2948 num_copies
= btrfs_num_copies(&state
->root
->fs_info
->mapping_tree
,
2949 bytenr
, state
->metablock_size
);
2951 for (mirror_num
= 1; mirror_num
<= num_copies
; mirror_num
++) {
2952 ret
= btrfsic_map_block(state
, bytenr
, state
->metablock_size
,
2953 &block_ctx
, mirror_num
);
2955 printk(KERN_INFO
"btrfsic:"
2956 " btrfsic_map_block(logical @%llu,"
2957 " mirror %d) failed!\n",
2958 (unsigned long long)bytenr
, mirror_num
);
2962 if (dev_state
->bdev
== block_ctx
.dev
->bdev
&&
2963 dev_bytenr
== block_ctx
.dev_bytenr
) {
2965 btrfsic_release_block_ctx(&block_ctx
);
2968 btrfsic_release_block_ctx(&block_ctx
);
2972 printk(KERN_INFO
"btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
2973 " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
2974 " phys_bytenr=%llu)!\n",
2975 (unsigned long long)bytenr
, dev_state
->name
,
2976 (unsigned long long)dev_bytenr
);
2977 for (mirror_num
= 1; mirror_num
<= num_copies
; mirror_num
++) {
2978 ret
= btrfsic_map_block(state
, bytenr
,
2979 state
->metablock_size
,
2980 &block_ctx
, mirror_num
);
2984 printk(KERN_INFO
"Read logical bytenr @%llu maps to"
2986 (unsigned long long)bytenr
,
2987 block_ctx
.dev
->name
,
2988 (unsigned long long)block_ctx
.dev_bytenr
,
2995 static struct btrfsic_dev_state
*btrfsic_dev_state_lookup(
2996 struct block_device
*bdev
)
2998 struct btrfsic_dev_state
*ds
;
3000 ds
= btrfsic_dev_state_hashtable_lookup(bdev
,
3001 &btrfsic_dev_state_hashtable
);
3005 int btrfsic_submit_bh(int rw
, struct buffer_head
*bh
)
3007 struct btrfsic_dev_state
*dev_state
;
3009 if (!btrfsic_is_initialized
)
3010 return submit_bh(rw
, bh
);
3012 mutex_lock(&btrfsic_mutex
);
3013 /* since btrfsic_submit_bh() might also be called before
3014 * btrfsic_mount(), this might return NULL */
3015 dev_state
= btrfsic_dev_state_lookup(bh
->b_bdev
);
3017 /* Only called to write the superblock (incl. FLUSH/FUA) */
3018 if (NULL
!= dev_state
&&
3019 (rw
& WRITE
) && bh
->b_size
> 0) {
3022 dev_bytenr
= 4096 * bh
->b_blocknr
;
3023 if (dev_state
->state
->print_mask
&
3024 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
)
3026 "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu),"
3027 " size=%lu, data=%p, bdev=%p)\n",
3028 rw
, (unsigned long)bh
->b_blocknr
,
3029 (unsigned long long)dev_bytenr
,
3030 (unsigned long)bh
->b_size
, bh
->b_data
,
3032 btrfsic_process_written_block(dev_state
, dev_bytenr
,
3033 &bh
->b_data
, 1, NULL
,
3035 } else if (NULL
!= dev_state
&& (rw
& REQ_FLUSH
)) {
3036 if (dev_state
->state
->print_mask
&
3037 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
)
3039 "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
3041 if (!dev_state
->dummy_block_for_bio_bh_flush
.is_iodone
) {
3042 if ((dev_state
->state
->print_mask
&
3043 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
|
3044 BTRFSIC_PRINT_MASK_VERBOSE
)))
3046 "btrfsic_submit_bh(%s) with FLUSH"
3047 " but dummy block already in use"
3051 struct btrfsic_block
*const block
=
3052 &dev_state
->dummy_block_for_bio_bh_flush
;
3054 block
->is_iodone
= 0;
3055 block
->never_written
= 0;
3056 block
->iodone_w_error
= 0;
3057 block
->flush_gen
= dev_state
->last_flush_gen
+ 1;
3058 block
->submit_bio_bh_rw
= rw
;
3059 block
->orig_bio_bh_private
= bh
->b_private
;
3060 block
->orig_bio_bh_end_io
.bh
= bh
->b_end_io
;
3061 block
->next_in_same_bio
= NULL
;
3062 bh
->b_private
= block
;
3063 bh
->b_end_io
= btrfsic_bh_end_io
;
3066 mutex_unlock(&btrfsic_mutex
);
3067 return submit_bh(rw
, bh
);
3070 void btrfsic_submit_bio(int rw
, struct bio
*bio
)
3072 struct btrfsic_dev_state
*dev_state
;
3074 if (!btrfsic_is_initialized
) {
3075 submit_bio(rw
, bio
);
3079 mutex_lock(&btrfsic_mutex
);
3080 /* since btrfsic_submit_bio() is also called before
3081 * btrfsic_mount(), this might return NULL */
3082 dev_state
= btrfsic_dev_state_lookup(bio
->bi_bdev
);
3083 if (NULL
!= dev_state
&&
3084 (rw
& WRITE
) && NULL
!= bio
->bi_io_vec
) {
3088 char **mapped_datav
;
3090 dev_bytenr
= 512 * bio
->bi_sector
;
3092 if (dev_state
->state
->print_mask
&
3093 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
)
3095 "submit_bio(rw=0x%x, bi_vcnt=%u,"
3096 " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n",
3097 rw
, bio
->bi_vcnt
, (unsigned long)bio
->bi_sector
,
3098 (unsigned long long)dev_bytenr
,
3101 mapped_datav
= kmalloc(sizeof(*mapped_datav
) * bio
->bi_vcnt
,
3105 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
3106 BUG_ON(bio
->bi_io_vec
[i
].bv_len
!= PAGE_CACHE_SIZE
);
3107 mapped_datav
[i
] = kmap(bio
->bi_io_vec
[i
].bv_page
);
3108 if (!mapped_datav
[i
]) {
3111 kunmap(bio
->bi_io_vec
[i
].bv_page
);
3113 kfree(mapped_datav
);
3116 if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
|
3117 BTRFSIC_PRINT_MASK_VERBOSE
) ==
3118 (dev_state
->state
->print_mask
&
3119 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
|
3120 BTRFSIC_PRINT_MASK_VERBOSE
)))
3122 "#%u: page=%p, len=%u, offset=%u\n",
3123 i
, bio
->bi_io_vec
[i
].bv_page
,
3124 bio
->bi_io_vec
[i
].bv_len
,
3125 bio
->bi_io_vec
[i
].bv_offset
);
3127 btrfsic_process_written_block(dev_state
, dev_bytenr
,
3128 mapped_datav
, bio
->bi_vcnt
,
3129 bio
, &bio_is_patched
,
3133 kunmap(bio
->bi_io_vec
[i
].bv_page
);
3135 kfree(mapped_datav
);
3136 } else if (NULL
!= dev_state
&& (rw
& REQ_FLUSH
)) {
3137 if (dev_state
->state
->print_mask
&
3138 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
)
3140 "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
3142 if (!dev_state
->dummy_block_for_bio_bh_flush
.is_iodone
) {
3143 if ((dev_state
->state
->print_mask
&
3144 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH
|
3145 BTRFSIC_PRINT_MASK_VERBOSE
)))
3147 "btrfsic_submit_bio(%s) with FLUSH"
3148 " but dummy block already in use"
3152 struct btrfsic_block
*const block
=
3153 &dev_state
->dummy_block_for_bio_bh_flush
;
3155 block
->is_iodone
= 0;
3156 block
->never_written
= 0;
3157 block
->iodone_w_error
= 0;
3158 block
->flush_gen
= dev_state
->last_flush_gen
+ 1;
3159 block
->submit_bio_bh_rw
= rw
;
3160 block
->orig_bio_bh_private
= bio
->bi_private
;
3161 block
->orig_bio_bh_end_io
.bio
= bio
->bi_end_io
;
3162 block
->next_in_same_bio
= NULL
;
3163 bio
->bi_private
= block
;
3164 bio
->bi_end_io
= btrfsic_bio_end_io
;
3168 mutex_unlock(&btrfsic_mutex
);
3170 submit_bio(rw
, bio
);
3173 int btrfsic_mount(struct btrfs_root
*root
,
3174 struct btrfs_fs_devices
*fs_devices
,
3175 int including_extent_data
, u32 print_mask
)
3178 struct btrfsic_state
*state
;
3179 struct list_head
*dev_head
= &fs_devices
->devices
;
3180 struct btrfs_device
*device
;
3182 if (root
->nodesize
!= root
->leafsize
) {
3184 "btrfsic: cannot handle nodesize %d != leafsize %d!\n",
3185 root
->nodesize
, root
->leafsize
);
3188 if (root
->nodesize
& ((u64
)PAGE_CACHE_SIZE
- 1)) {
3190 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
3191 root
->nodesize
, (unsigned long)PAGE_CACHE_SIZE
);
3194 if (root
->leafsize
& ((u64
)PAGE_CACHE_SIZE
- 1)) {
3196 "btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
3197 root
->leafsize
, (unsigned long)PAGE_CACHE_SIZE
);
3200 if (root
->sectorsize
& ((u64
)PAGE_CACHE_SIZE
- 1)) {
3202 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
3203 root
->sectorsize
, (unsigned long)PAGE_CACHE_SIZE
);
3206 state
= kzalloc(sizeof(*state
), GFP_NOFS
);
3207 if (NULL
== state
) {
3208 printk(KERN_INFO
"btrfs check-integrity: kmalloc() failed!\n");
3212 if (!btrfsic_is_initialized
) {
3213 mutex_init(&btrfsic_mutex
);
3214 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable
);
3215 btrfsic_is_initialized
= 1;
3217 mutex_lock(&btrfsic_mutex
);
3219 state
->print_mask
= print_mask
;
3220 state
->include_extent_data
= including_extent_data
;
3221 state
->csum_size
= 0;
3222 state
->metablock_size
= root
->nodesize
;
3223 state
->datablock_size
= root
->sectorsize
;
3224 INIT_LIST_HEAD(&state
->all_blocks_list
);
3225 btrfsic_block_hashtable_init(&state
->block_hashtable
);
3226 btrfsic_block_link_hashtable_init(&state
->block_link_hashtable
);
3227 state
->max_superblock_generation
= 0;
3228 state
->latest_superblock
= NULL
;
3230 list_for_each_entry(device
, dev_head
, dev_list
) {
3231 struct btrfsic_dev_state
*ds
;
3234 if (!device
->bdev
|| !device
->name
)
3237 ds
= btrfsic_dev_state_alloc();
3240 "btrfs check-integrity: kmalloc() failed!\n");
3241 mutex_unlock(&btrfsic_mutex
);
3244 ds
->bdev
= device
->bdev
;
3246 bdevname(ds
->bdev
, ds
->name
);
3247 ds
->name
[BDEVNAME_SIZE
- 1] = '\0';
3248 for (p
= ds
->name
; *p
!= '\0'; p
++);
3249 while (p
> ds
->name
&& *p
!= '/')
3253 strlcpy(ds
->name
, p
, sizeof(ds
->name
));
3254 btrfsic_dev_state_hashtable_add(ds
,
3255 &btrfsic_dev_state_hashtable
);
3258 ret
= btrfsic_process_superblock(state
, fs_devices
);
3260 mutex_unlock(&btrfsic_mutex
);
3261 btrfsic_unmount(root
, fs_devices
);
3265 if (state
->print_mask
& BTRFSIC_PRINT_MASK_INITIAL_DATABASE
)
3266 btrfsic_dump_database(state
);
3267 if (state
->print_mask
& BTRFSIC_PRINT_MASK_INITIAL_TREE
)
3268 btrfsic_dump_tree(state
);
3270 mutex_unlock(&btrfsic_mutex
);
3274 void btrfsic_unmount(struct btrfs_root
*root
,
3275 struct btrfs_fs_devices
*fs_devices
)
3277 struct list_head
*elem_all
;
3278 struct list_head
*tmp_all
;
3279 struct btrfsic_state
*state
;
3280 struct list_head
*dev_head
= &fs_devices
->devices
;
3281 struct btrfs_device
*device
;
3283 if (!btrfsic_is_initialized
)
3286 mutex_lock(&btrfsic_mutex
);
3289 list_for_each_entry(device
, dev_head
, dev_list
) {
3290 struct btrfsic_dev_state
*ds
;
3292 if (!device
->bdev
|| !device
->name
)
3295 ds
= btrfsic_dev_state_hashtable_lookup(
3297 &btrfsic_dev_state_hashtable
);
3300 btrfsic_dev_state_hashtable_remove(ds
);
3301 btrfsic_dev_state_free(ds
);
3305 if (NULL
== state
) {
3307 "btrfsic: error, cannot find state information"
3309 mutex_unlock(&btrfsic_mutex
);
3314 * Don't care about keeping the lists' state up to date,
3315 * just free all memory that was allocated dynamically.
3316 * Free the blocks and the block_links.
3318 list_for_each_safe(elem_all
, tmp_all
, &state
->all_blocks_list
) {
3319 struct btrfsic_block
*const b_all
=
3320 list_entry(elem_all
, struct btrfsic_block
,
3322 struct list_head
*elem_ref_to
;
3323 struct list_head
*tmp_ref_to
;
3325 list_for_each_safe(elem_ref_to
, tmp_ref_to
,
3326 &b_all
->ref_to_list
) {
3327 struct btrfsic_block_link
*const l
=
3328 list_entry(elem_ref_to
,
3329 struct btrfsic_block_link
,
3332 if (state
->print_mask
& BTRFSIC_PRINT_MASK_VERBOSE
)
3333 btrfsic_print_rem_link(state
, l
);
3336 if (0 == l
->ref_cnt
)
3337 btrfsic_block_link_free(l
);
3340 if (b_all
->is_iodone
|| b_all
->never_written
)
3341 btrfsic_block_free(b_all
);
3343 printk(KERN_INFO
"btrfs: attempt to free %c-block"
3344 " @%llu (%s/%llu/%d) on umount which is"
3345 " not yet iodone!\n",
3346 btrfsic_get_block_type(state
, b_all
),
3347 (unsigned long long)b_all
->logical_bytenr
,
3348 b_all
->dev_state
->name
,
3349 (unsigned long long)b_all
->dev_bytenr
,
3353 mutex_unlock(&btrfsic_mutex
);