2 * recovery.c - NILFS recovery logic
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/swap.h>
26 #include <linux/slab.h>
27 #include <linux/crc32.h>
35 * Segment check result
39 NILFS_SEG_NO_SUPER_ROOT
,
43 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
,
44 NILFS_SEG_FAIL_CHECKSUM_FULL
,
45 NILFS_SEG_FAIL_CONSISTENCY
,
48 /* work structure for recovery */
49 struct nilfs_recovery_block
{
50 ino_t ino
; /* Inode number of the file that this block
52 sector_t blocknr
; /* block number */
53 __u64 vblocknr
; /* virtual block number */
54 unsigned long blkoff
; /* File offset of the data block (per block) */
55 struct list_head list
;
59 static int nilfs_warn_segment_error(int err
)
62 case NILFS_SEG_FAIL_IO
:
64 "NILFS warning: I/O error on loading last segment\n");
66 case NILFS_SEG_FAIL_MAGIC
:
68 "NILFS warning: Segment magic number invalid\n");
70 case NILFS_SEG_FAIL_SEQ
:
72 "NILFS warning: Sequence number mismatch\n");
74 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
:
76 "NILFS warning: Checksum error in super root\n");
78 case NILFS_SEG_FAIL_CHECKSUM_FULL
:
80 "NILFS warning: Checksum error in segment payload\n");
82 case NILFS_SEG_FAIL_CONSISTENCY
:
84 "NILFS warning: Inconsistent segment\n");
86 case NILFS_SEG_NO_SUPER_ROOT
:
88 "NILFS warning: No super root in the last segment\n");
94 static void store_segsum_info(struct nilfs_segsum_info
*ssi
,
95 struct nilfs_segment_summary
*sum
,
96 unsigned int blocksize
)
98 ssi
->flags
= le16_to_cpu(sum
->ss_flags
);
99 ssi
->seg_seq
= le64_to_cpu(sum
->ss_seq
);
100 ssi
->ctime
= le64_to_cpu(sum
->ss_create
);
101 ssi
->next
= le64_to_cpu(sum
->ss_next
);
102 ssi
->nblocks
= le32_to_cpu(sum
->ss_nblocks
);
103 ssi
->nfinfo
= le32_to_cpu(sum
->ss_nfinfo
);
104 ssi
->sumbytes
= le32_to_cpu(sum
->ss_sumbytes
);
106 ssi
->nsumblk
= DIV_ROUND_UP(ssi
->sumbytes
, blocksize
);
107 ssi
->nfileblk
= ssi
->nblocks
- ssi
->nsumblk
- !!NILFS_SEG_HAS_SR(ssi
);
109 /* need to verify ->ss_bytes field if read ->ss_cno */
113 * calc_crc_cont - check CRC of blocks continuously
114 * @sbi: nilfs_sb_info
115 * @bhs: buffer head of start block
116 * @sum: place to store result
117 * @offset: offset bytes in the first block
118 * @check_bytes: number of bytes to be checked
119 * @start: DBN of start block
120 * @nblock: number of blocks to be checked
122 static int calc_crc_cont(struct nilfs_sb_info
*sbi
, struct buffer_head
*bhs
,
123 u32
*sum
, unsigned long offset
, u64 check_bytes
,
124 sector_t start
, unsigned long nblock
)
126 unsigned long blocksize
= sbi
->s_super
->s_blocksize
;
130 BUG_ON(offset
>= blocksize
);
131 check_bytes
-= offset
;
132 size
= min_t(u64
, check_bytes
, blocksize
- offset
);
133 crc
= crc32_le(sbi
->s_nilfs
->ns_crc_seed
,
134 (unsigned char *)bhs
->b_data
+ offset
, size
);
137 struct buffer_head
*bh
138 = sb_bread(sbi
->s_super
, ++start
);
142 size
= min_t(u64
, check_bytes
, blocksize
);
143 crc
= crc32_le(crc
, bh
->b_data
, size
);
145 } while (--nblock
> 0);
152 * nilfs_read_super_root_block - read super root block
154 * @sr_block: disk block number of the super root block
155 * @pbh: address of a buffer_head pointer to return super root buffer
156 * @check: CRC check flag
158 int nilfs_read_super_root_block(struct super_block
*sb
, sector_t sr_block
,
159 struct buffer_head
**pbh
, int check
)
161 struct buffer_head
*bh_sr
;
162 struct nilfs_super_root
*sr
;
167 bh_sr
= sb_bread(sb
, sr_block
);
168 if (unlikely(!bh_sr
)) {
169 ret
= NILFS_SEG_FAIL_IO
;
173 sr
= (struct nilfs_super_root
*)bh_sr
->b_data
;
175 unsigned bytes
= le16_to_cpu(sr
->sr_bytes
);
177 if (bytes
== 0 || bytes
> sb
->s_blocksize
) {
178 ret
= NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
;
181 if (calc_crc_cont(NILFS_SB(sb
), bh_sr
, &crc
,
182 sizeof(sr
->sr_sum
), bytes
, sr_block
, 1)) {
183 ret
= NILFS_SEG_FAIL_IO
;
186 if (crc
!= le32_to_cpu(sr
->sr_sum
)) {
187 ret
= NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
;
198 return nilfs_warn_segment_error(ret
);
202 * load_segment_summary - read segment summary of the specified partial segment
203 * @sbi: nilfs_sb_info
204 * @pseg_start: start disk block number of partial segment
205 * @seg_seq: sequence number requested
206 * @ssi: pointer to nilfs_segsum_info struct to store information
209 load_segment_summary(struct nilfs_sb_info
*sbi
, sector_t pseg_start
,
210 u64 seg_seq
, struct nilfs_segsum_info
*ssi
)
212 struct buffer_head
*bh_sum
;
213 struct nilfs_segment_summary
*sum
;
214 unsigned long nblock
;
216 int ret
= NILFS_SEG_FAIL_IO
;
218 bh_sum
= sb_bread(sbi
->s_super
, pseg_start
);
222 sum
= (struct nilfs_segment_summary
*)bh_sum
->b_data
;
224 /* Check consistency of segment summary */
225 if (le32_to_cpu(sum
->ss_magic
) != NILFS_SEGSUM_MAGIC
) {
226 ret
= NILFS_SEG_FAIL_MAGIC
;
229 store_segsum_info(ssi
, sum
, sbi
->s_super
->s_blocksize
);
230 if (seg_seq
!= ssi
->seg_seq
) {
231 ret
= NILFS_SEG_FAIL_SEQ
;
235 nblock
= ssi
->nblocks
;
236 if (unlikely(nblock
== 0 ||
237 nblock
> sbi
->s_nilfs
->ns_blocks_per_segment
)) {
238 /* This limits the number of blocks read in the CRC check */
239 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
242 if (calc_crc_cont(sbi
, bh_sum
, &crc
, sizeof(sum
->ss_datasum
),
243 ((u64
)nblock
<< sbi
->s_super
->s_blocksize_bits
),
244 pseg_start
, nblock
)) {
245 ret
= NILFS_SEG_FAIL_IO
;
248 if (crc
== le32_to_cpu(sum
->ss_datasum
))
251 ret
= NILFS_SEG_FAIL_CHECKSUM_FULL
;
258 static void *segsum_get(struct super_block
*sb
, struct buffer_head
**pbh
,
259 unsigned int *offset
, unsigned int bytes
)
264 BUG_ON((*pbh
)->b_size
< *offset
);
265 if (bytes
> (*pbh
)->b_size
- *offset
) {
266 blocknr
= (*pbh
)->b_blocknr
;
268 *pbh
= sb_bread(sb
, blocknr
+ 1);
273 ptr
= (*pbh
)->b_data
+ *offset
;
278 static void segsum_skip(struct super_block
*sb
, struct buffer_head
**pbh
,
279 unsigned int *offset
, unsigned int bytes
,
282 unsigned int rest_item_in_current_block
283 = ((*pbh
)->b_size
- *offset
) / bytes
;
285 if (count
<= rest_item_in_current_block
) {
286 *offset
+= bytes
* count
;
288 sector_t blocknr
= (*pbh
)->b_blocknr
;
289 unsigned int nitem_per_block
= (*pbh
)->b_size
/ bytes
;
292 count
-= rest_item_in_current_block
;
293 bcnt
= DIV_ROUND_UP(count
, nitem_per_block
);
294 *offset
= bytes
* (count
- (bcnt
- 1) * nitem_per_block
);
297 *pbh
= sb_bread(sb
, blocknr
+ bcnt
);
302 collect_blocks_from_segsum(struct nilfs_sb_info
*sbi
, sector_t sum_blocknr
,
303 struct nilfs_segsum_info
*ssi
,
304 struct list_head
*head
)
306 struct buffer_head
*bh
;
308 unsigned long nfinfo
= ssi
->nfinfo
;
309 sector_t blocknr
= sum_blocknr
+ ssi
->nsumblk
;
316 bh
= sb_bread(sbi
->s_super
, sum_blocknr
);
320 offset
= le16_to_cpu(
321 ((struct nilfs_segment_summary
*)bh
->b_data
)->ss_bytes
);
323 unsigned long nblocks
, ndatablk
, nnodeblk
;
324 struct nilfs_finfo
*finfo
;
326 finfo
= segsum_get(sbi
->s_super
, &bh
, &offset
, sizeof(*finfo
));
327 if (unlikely(!finfo
))
330 ino
= le64_to_cpu(finfo
->fi_ino
);
331 nblocks
= le32_to_cpu(finfo
->fi_nblocks
);
332 ndatablk
= le32_to_cpu(finfo
->fi_ndatablk
);
333 nnodeblk
= nblocks
- ndatablk
;
335 while (ndatablk
-- > 0) {
336 struct nilfs_recovery_block
*rb
;
337 struct nilfs_binfo_v
*binfo
;
339 binfo
= segsum_get(sbi
->s_super
, &bh
, &offset
,
341 if (unlikely(!binfo
))
344 rb
= kmalloc(sizeof(*rb
), GFP_NOFS
);
350 rb
->blocknr
= blocknr
++;
351 rb
->vblocknr
= le64_to_cpu(binfo
->bi_vblocknr
);
352 rb
->blkoff
= le64_to_cpu(binfo
->bi_blkoff
);
353 /* INIT_LIST_HEAD(&rb->list); */
354 list_add_tail(&rb
->list
, head
);
358 blocknr
+= nnodeblk
; /* always 0 for the data sync segments */
359 segsum_skip(sbi
->s_super
, &bh
, &offset
, sizeof(__le64
),
366 brelse(bh
); /* brelse(NULL) is just ignored */
370 static void dispose_recovery_list(struct list_head
*head
)
372 while (!list_empty(head
)) {
373 struct nilfs_recovery_block
*rb
374 = list_entry(head
->next
,
375 struct nilfs_recovery_block
, list
);
381 struct nilfs_segment_entry
{
382 struct list_head list
;
386 static int nilfs_segment_list_add(struct list_head
*head
, __u64 segnum
)
388 struct nilfs_segment_entry
*ent
= kmalloc(sizeof(*ent
), GFP_NOFS
);
393 ent
->segnum
= segnum
;
394 INIT_LIST_HEAD(&ent
->list
);
395 list_add_tail(&ent
->list
, head
);
399 void nilfs_dispose_segment_list(struct list_head
*head
)
401 while (!list_empty(head
)) {
402 struct nilfs_segment_entry
*ent
403 = list_entry(head
->next
,
404 struct nilfs_segment_entry
, list
);
405 list_del(&ent
->list
);
410 static int nilfs_prepare_segment_for_recovery(struct the_nilfs
*nilfs
,
411 struct nilfs_sb_info
*sbi
,
412 struct nilfs_recovery_info
*ri
)
414 struct list_head
*head
= &ri
->ri_used_segments
;
415 struct nilfs_segment_entry
*ent
, *n
;
416 struct inode
*sufile
= nilfs
->ns_sufile
;
421 segnum
[0] = nilfs
->ns_segnum
;
422 segnum
[1] = nilfs
->ns_nextnum
;
423 segnum
[2] = ri
->ri_segnum
;
424 segnum
[3] = ri
->ri_nextnum
;
426 nilfs_attach_writer(nilfs
, sbi
);
428 * Releasing the next segment of the latest super root.
429 * The next segment is invalidated by this recovery.
431 err
= nilfs_sufile_free(sufile
, segnum
[1]);
435 for (i
= 1; i
< 4; i
++) {
436 err
= nilfs_segment_list_add(head
, segnum
[i
]);
442 * Collecting segments written after the latest super root.
443 * These are marked dirty to avoid being reallocated in the next write.
445 list_for_each_entry_safe(ent
, n
, head
, list
) {
446 if (ent
->segnum
!= segnum
[0]) {
447 err
= nilfs_sufile_scrap(sufile
, ent
->segnum
);
451 list_del(&ent
->list
);
455 /* Allocate new segments for recovery */
456 err
= nilfs_sufile_alloc(sufile
, &segnum
[0]);
460 nilfs
->ns_pseg_offset
= 0;
461 nilfs
->ns_seg_seq
= ri
->ri_seq
+ 2;
462 nilfs
->ns_nextnum
= nilfs
->ns_segnum
= segnum
[0];
465 /* No need to recover sufile because it will be destroyed on error */
466 nilfs_detach_writer(nilfs
, sbi
);
470 static int nilfs_recovery_copy_block(struct nilfs_sb_info
*sbi
,
471 struct nilfs_recovery_block
*rb
,
474 struct buffer_head
*bh_org
;
477 bh_org
= sb_bread(sbi
->s_super
, rb
->blocknr
);
478 if (unlikely(!bh_org
))
481 kaddr
= kmap_atomic(page
, KM_USER0
);
482 memcpy(kaddr
+ bh_offset(bh_org
), bh_org
->b_data
, bh_org
->b_size
);
483 kunmap_atomic(kaddr
, KM_USER0
);
488 static int recover_dsync_blocks(struct nilfs_sb_info
*sbi
,
489 struct list_head
*head
,
490 unsigned long *nr_salvaged_blocks
)
493 struct nilfs_recovery_block
*rb
, *n
;
494 unsigned blocksize
= sbi
->s_super
->s_blocksize
;
497 int err
= 0, err2
= 0;
499 list_for_each_entry_safe(rb
, n
, head
, list
) {
500 inode
= nilfs_iget(sbi
->s_super
, rb
->ino
);
502 err
= PTR_ERR(inode
);
507 pos
= rb
->blkoff
<< inode
->i_blkbits
;
509 err
= block_write_begin(NULL
, inode
->i_mapping
, pos
, blocksize
,
510 0, &page
, NULL
, nilfs_get_block
);
514 err
= nilfs_recovery_copy_block(sbi
, rb
, page
);
518 err
= nilfs_set_file_dirty(sbi
, inode
, 1);
522 block_write_end(NULL
, inode
->i_mapping
, pos
, blocksize
,
523 blocksize
, page
, NULL
);
526 page_cache_release(page
);
528 (*nr_salvaged_blocks
)++;
533 page_cache_release(page
);
537 "NILFS warning: error recovering data block "
538 "(err=%d, ino=%lu, block-offset=%llu)\n",
539 err
, (unsigned long)rb
->ino
,
540 (unsigned long long)rb
->blkoff
);
544 iput(inode
); /* iput(NULL) is just ignored */
545 list_del_init(&rb
->list
);
552 * nilfs_do_roll_forward - salvage logical segments newer than the latest
554 * @sbi: nilfs_sb_info
556 * @ri: pointer to a nilfs_recovery_info
558 static int nilfs_do_roll_forward(struct the_nilfs
*nilfs
,
559 struct nilfs_sb_info
*sbi
,
560 struct nilfs_recovery_info
*ri
)
562 struct nilfs_segsum_info ssi
;
564 sector_t seg_start
, seg_end
; /* Starting/ending DBN of full segment */
565 unsigned long nsalvaged_blocks
= 0;
567 __u64 segnum
, nextnum
= 0;
570 LIST_HEAD(dsync_blocks
); /* list of data blocks to be recovered */
573 RF_DSYNC_ST
, /* scanning data-sync segments */
575 int state
= RF_INIT_ST
;
577 nilfs_attach_writer(nilfs
, sbi
);
578 pseg_start
= ri
->ri_lsegs_start
;
579 seg_seq
= ri
->ri_lsegs_start_seq
;
580 segnum
= nilfs_get_segnum_of_block(nilfs
, pseg_start
);
581 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
583 while (segnum
!= ri
->ri_segnum
|| pseg_start
<= ri
->ri_pseg_start
) {
585 ret
= load_segment_summary(sbi
, pseg_start
, seg_seq
, &ssi
);
587 if (ret
== NILFS_SEG_FAIL_IO
) {
593 if (unlikely(NILFS_SEG_HAS_SR(&ssi
)))
596 /* Found a valid partial segment; do recovery actions */
597 nextnum
= nilfs_get_segnum_of_block(nilfs
, ssi
.next
);
599 nilfs
->ns_ctime
= ssi
.ctime
;
600 if (!(ssi
.flags
& NILFS_SS_GC
))
601 nilfs
->ns_nongc_ctime
= ssi
.ctime
;
605 if (!NILFS_SEG_LOGBGN(&ssi
) || !NILFS_SEG_DSYNC(&ssi
))
610 if (!NILFS_SEG_DSYNC(&ssi
))
613 err
= collect_blocks_from_segsum(
614 sbi
, pseg_start
, &ssi
, &dsync_blocks
);
617 if (NILFS_SEG_LOGEND(&ssi
)) {
618 err
= recover_dsync_blocks(
619 sbi
, &dsync_blocks
, &nsalvaged_blocks
);
624 break; /* Fall through to try_next_pseg */
628 if (pseg_start
== ri
->ri_lsegs_end
)
630 pseg_start
+= ssi
.nblocks
;
631 if (pseg_start
< seg_end
)
636 if (pseg_start
== ri
->ri_lsegs_end
)
640 /* Looking to the next full segment */
645 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
646 pseg_start
= seg_start
;
649 if (nsalvaged_blocks
) {
650 printk(KERN_INFO
"NILFS (device %s): salvaged %lu blocks\n",
651 sbi
->s_super
->s_id
, nsalvaged_blocks
);
652 ri
->ri_need_recovery
= NILFS_RECOVERY_ROLLFORWARD_DONE
;
655 dispose_recovery_list(&dsync_blocks
);
656 nilfs_detach_writer(sbi
->s_nilfs
, sbi
);
663 "NILFS (device %s): Error roll-forwarding "
664 "(err=%d, pseg block=%llu). ",
665 sbi
->s_super
->s_id
, err
, (unsigned long long)pseg_start
);
669 static void nilfs_finish_roll_forward(struct the_nilfs
*nilfs
,
670 struct nilfs_sb_info
*sbi
,
671 struct nilfs_recovery_info
*ri
)
673 struct buffer_head
*bh
;
676 if (nilfs_get_segnum_of_block(nilfs
, ri
->ri_lsegs_start
) !=
677 nilfs_get_segnum_of_block(nilfs
, ri
->ri_super_root
))
680 bh
= sb_getblk(sbi
->s_super
, ri
->ri_lsegs_start
);
682 memset(bh
->b_data
, 0, bh
->b_size
);
683 set_buffer_dirty(bh
);
684 err
= sync_dirty_buffer(bh
);
687 "NILFS warning: buffer sync write failed during "
688 "post-cleaning of recovery.\n");
693 * nilfs_recover_logical_segments - salvage logical segments written after
694 * the latest super root
696 * @sbi: nilfs_sb_info
697 * @ri: pointer to a nilfs_recovery_info struct to store search results.
699 * Return Value: On success, 0 is returned. On error, one of the following
700 * negative error code is returned.
702 * %-EINVAL - Inconsistent filesystem state.
706 * %-ENOSPC - No space left on device (only in a panic state).
708 * %-ERESTARTSYS - Interrupted.
710 * %-ENOMEM - Insufficient memory available.
712 int nilfs_recover_logical_segments(struct the_nilfs
*nilfs
,
713 struct nilfs_sb_info
*sbi
,
714 struct nilfs_recovery_info
*ri
)
718 if (ri
->ri_lsegs_start
== 0 || ri
->ri_lsegs_end
== 0)
721 err
= nilfs_attach_checkpoint(sbi
, ri
->ri_cno
);
724 "NILFS: error loading the latest checkpoint.\n");
728 err
= nilfs_do_roll_forward(nilfs
, sbi
, ri
);
732 if (ri
->ri_need_recovery
== NILFS_RECOVERY_ROLLFORWARD_DONE
) {
733 err
= nilfs_prepare_segment_for_recovery(nilfs
, sbi
, ri
);
735 printk(KERN_ERR
"NILFS: Error preparing segments for "
740 err
= nilfs_attach_segment_constructor(sbi
);
744 set_nilfs_discontinued(nilfs
);
745 err
= nilfs_construct_segment(sbi
->s_super
);
746 nilfs_detach_segment_constructor(sbi
);
749 printk(KERN_ERR
"NILFS: Oops! recovery failed. "
754 nilfs_finish_roll_forward(nilfs
, sbi
, ri
);
758 nilfs_detach_checkpoint(sbi
);
763 * nilfs_search_super_root - search the latest valid super root
765 * @sbi: nilfs_sb_info
766 * @ri: pointer to a nilfs_recovery_info struct to store search results.
768 * nilfs_search_super_root() looks for the latest super-root from a partial
769 * segment pointed by the superblock. It sets up struct the_nilfs through
770 * this search. It fills nilfs_recovery_info (ri) required for recovery.
772 * Return Value: On success, 0 is returned. On error, one of the following
773 * negative error code is returned.
775 * %-EINVAL - No valid segment found
779 int nilfs_search_super_root(struct the_nilfs
*nilfs
, struct nilfs_sb_info
*sbi
,
780 struct nilfs_recovery_info
*ri
)
782 struct nilfs_segsum_info ssi
;
783 sector_t pseg_start
, pseg_end
, sr_pseg_start
= 0;
784 sector_t seg_start
, seg_end
; /* range of full segment (block number) */
787 __u64 segnum
, nextnum
= 0;
790 int empty_seg
= 0, scan_newer
= 0;
793 pseg_start
= nilfs
->ns_last_pseg
;
794 seg_seq
= nilfs
->ns_last_seq
;
795 cno
= nilfs
->ns_last_cno
;
796 segnum
= nilfs_get_segnum_of_block(nilfs
, pseg_start
);
798 /* Calculate range of segment */
799 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
801 /* Read ahead segment */
804 sb_breadahead(sbi
->s_super
, b
++);
807 /* Load segment summary */
808 ret
= load_segment_summary(sbi
, pseg_start
, seg_seq
, &ssi
);
810 if (ret
== NILFS_SEG_FAIL_IO
)
814 pseg_end
= pseg_start
+ ssi
.nblocks
- 1;
815 if (unlikely(pseg_end
> seg_end
)) {
816 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
820 /* A valid partial segment */
821 ri
->ri_pseg_start
= pseg_start
;
822 ri
->ri_seq
= seg_seq
;
823 ri
->ri_segnum
= segnum
;
824 nextnum
= nilfs_get_segnum_of_block(nilfs
, ssi
.next
);
825 ri
->ri_nextnum
= nextnum
;
828 if (!NILFS_SEG_HAS_SR(&ssi
) && !scan_newer
) {
829 /* This will never happen because a superblock
830 (last_segment) always points to a pseg
831 having a super root. */
832 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
836 if (pseg_start
== seg_start
) {
837 nilfs_get_segment_range(nilfs
, nextnum
, &b
, &end
);
839 sb_breadahead(sbi
->s_super
, b
++);
841 if (!NILFS_SEG_HAS_SR(&ssi
)) {
842 if (!ri
->ri_lsegs_start
&& NILFS_SEG_LOGBGN(&ssi
)) {
843 ri
->ri_lsegs_start
= pseg_start
;
844 ri
->ri_lsegs_start_seq
= seg_seq
;
846 if (NILFS_SEG_LOGEND(&ssi
))
847 ri
->ri_lsegs_end
= pseg_start
;
851 /* A valid super root was found. */
853 ri
->ri_super_root
= pseg_end
;
854 ri
->ri_lsegs_start
= ri
->ri_lsegs_end
= 0;
856 nilfs_dispose_segment_list(&segments
);
857 nilfs
->ns_pseg_offset
= (sr_pseg_start
= pseg_start
)
858 + ssi
.nblocks
- seg_start
;
859 nilfs
->ns_seg_seq
= seg_seq
;
860 nilfs
->ns_segnum
= segnum
;
861 nilfs
->ns_cno
= cno
; /* nilfs->ns_cno = ri->ri_cno + 1 */
862 nilfs
->ns_ctime
= ssi
.ctime
;
863 nilfs
->ns_nextnum
= nextnum
;
866 ri
->ri_need_recovery
= NILFS_RECOVERY_SR_UPDATED
;
868 if (nilfs
->ns_mount_state
& NILFS_VALID_FS
)
869 goto super_root_found
;
873 /* reset region for roll-forward */
874 pseg_start
+= ssi
.nblocks
;
875 if (pseg_start
< seg_end
)
880 /* Standing on a course, or met an inconsistent state */
881 pseg_start
+= ssi
.nblocks
;
882 if (pseg_start
< seg_end
)
890 * This can happen if a checkpoint was written without
891 * barriers, or as a result of an I/O failure.
896 /* Looking to the next full segment */
898 goto super_root_found
; /* found a valid super root */
900 ret
= nilfs_segment_list_add(&segments
, segnum
);
906 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
907 pseg_start
= seg_start
;
911 /* Updating pointers relating to the latest checkpoint */
912 list_splice_tail(&segments
, &ri
->ri_used_segments
);
913 nilfs
->ns_last_pseg
= sr_pseg_start
;
914 nilfs
->ns_last_seq
= nilfs
->ns_seg_seq
;
915 nilfs
->ns_last_cno
= ri
->ri_cno
;
919 nilfs_dispose_segment_list(&segments
);
920 return (ret
< 0) ? ret
: nilfs_warn_segment_error(ret
);