2 * recovery.c - NILFS recovery logic
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/swap.h>
26 #include <linux/slab.h>
27 #include <linux/crc32.h>
35 * Segment check result
39 NILFS_SEG_NO_SUPER_ROOT
,
43 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
,
44 NILFS_SEG_FAIL_CHECKSUM_FULL
,
45 NILFS_SEG_FAIL_CONSISTENCY
,
48 /* work structure for recovery */
49 struct nilfs_recovery_block
{
50 ino_t ino
; /* Inode number of the file that this block
52 sector_t blocknr
; /* block number */
53 __u64 vblocknr
; /* virtual block number */
54 unsigned long blkoff
; /* File offset of the data block (per block) */
55 struct list_head list
;
59 static int nilfs_warn_segment_error(int err
)
62 case NILFS_SEG_FAIL_IO
:
64 "NILFS warning: I/O error on loading last segment\n");
66 case NILFS_SEG_FAIL_MAGIC
:
68 "NILFS warning: Segment magic number invalid\n");
70 case NILFS_SEG_FAIL_SEQ
:
72 "NILFS warning: Sequence number mismatch\n");
74 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
:
76 "NILFS warning: Checksum error in super root\n");
78 case NILFS_SEG_FAIL_CHECKSUM_FULL
:
80 "NILFS warning: Checksum error in segment payload\n");
82 case NILFS_SEG_FAIL_CONSISTENCY
:
84 "NILFS warning: Inconsistent segment\n");
86 case NILFS_SEG_NO_SUPER_ROOT
:
88 "NILFS warning: No super root in the last segment\n");
94 static void store_segsum_info(struct nilfs_segsum_info
*ssi
,
95 struct nilfs_segment_summary
*sum
,
96 unsigned int blocksize
)
98 ssi
->flags
= le16_to_cpu(sum
->ss_flags
);
99 ssi
->seg_seq
= le64_to_cpu(sum
->ss_seq
);
100 ssi
->ctime
= le64_to_cpu(sum
->ss_create
);
101 ssi
->next
= le64_to_cpu(sum
->ss_next
);
102 ssi
->nblocks
= le32_to_cpu(sum
->ss_nblocks
);
103 ssi
->nfinfo
= le32_to_cpu(sum
->ss_nfinfo
);
104 ssi
->sumbytes
= le32_to_cpu(sum
->ss_sumbytes
);
106 ssi
->nsumblk
= DIV_ROUND_UP(ssi
->sumbytes
, blocksize
);
107 ssi
->nfileblk
= ssi
->nblocks
- ssi
->nsumblk
- !!NILFS_SEG_HAS_SR(ssi
);
111 * calc_crc_cont - check CRC of blocks continuously
112 * @sbi: nilfs_sb_info
113 * @bhs: buffer head of start block
114 * @sum: place to store result
115 * @offset: offset bytes in the first block
116 * @check_bytes: number of bytes to be checked
117 * @start: DBN of start block
118 * @nblock: number of blocks to be checked
120 static int calc_crc_cont(struct nilfs_sb_info
*sbi
, struct buffer_head
*bhs
,
121 u32
*sum
, unsigned long offset
, u64 check_bytes
,
122 sector_t start
, unsigned long nblock
)
124 unsigned long blocksize
= sbi
->s_super
->s_blocksize
;
128 BUG_ON(offset
>= blocksize
);
129 check_bytes
-= offset
;
130 size
= min_t(u64
, check_bytes
, blocksize
- offset
);
131 crc
= crc32_le(sbi
->s_nilfs
->ns_crc_seed
,
132 (unsigned char *)bhs
->b_data
+ offset
, size
);
135 struct buffer_head
*bh
136 = sb_bread(sbi
->s_super
, ++start
);
140 size
= min_t(u64
, check_bytes
, blocksize
);
141 crc
= crc32_le(crc
, bh
->b_data
, size
);
143 } while (--nblock
> 0);
150 * nilfs_read_super_root_block - read super root block
152 * @sr_block: disk block number of the super root block
153 * @pbh: address of a buffer_head pointer to return super root buffer
154 * @check: CRC check flag
156 int nilfs_read_super_root_block(struct super_block
*sb
, sector_t sr_block
,
157 struct buffer_head
**pbh
, int check
)
159 struct buffer_head
*bh_sr
;
160 struct nilfs_super_root
*sr
;
165 bh_sr
= sb_bread(sb
, sr_block
);
166 if (unlikely(!bh_sr
)) {
167 ret
= NILFS_SEG_FAIL_IO
;
171 sr
= (struct nilfs_super_root
*)bh_sr
->b_data
;
173 unsigned bytes
= le16_to_cpu(sr
->sr_bytes
);
175 if (bytes
== 0 || bytes
> sb
->s_blocksize
) {
176 ret
= NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
;
179 if (calc_crc_cont(NILFS_SB(sb
), bh_sr
, &crc
,
180 sizeof(sr
->sr_sum
), bytes
, sr_block
, 1)) {
181 ret
= NILFS_SEG_FAIL_IO
;
184 if (crc
!= le32_to_cpu(sr
->sr_sum
)) {
185 ret
= NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
;
196 return nilfs_warn_segment_error(ret
);
200 * load_segment_summary - read segment summary of the specified partial segment
201 * @sbi: nilfs_sb_info
202 * @pseg_start: start disk block number of partial segment
203 * @seg_seq: sequence number requested
204 * @ssi: pointer to nilfs_segsum_info struct to store information
207 load_segment_summary(struct nilfs_sb_info
*sbi
, sector_t pseg_start
,
208 u64 seg_seq
, struct nilfs_segsum_info
*ssi
)
210 struct buffer_head
*bh_sum
;
211 struct nilfs_segment_summary
*sum
;
212 unsigned long nblock
;
214 int ret
= NILFS_SEG_FAIL_IO
;
216 bh_sum
= sb_bread(sbi
->s_super
, pseg_start
);
220 sum
= (struct nilfs_segment_summary
*)bh_sum
->b_data
;
222 /* Check consistency of segment summary */
223 if (le32_to_cpu(sum
->ss_magic
) != NILFS_SEGSUM_MAGIC
) {
224 ret
= NILFS_SEG_FAIL_MAGIC
;
227 store_segsum_info(ssi
, sum
, sbi
->s_super
->s_blocksize
);
228 if (seg_seq
!= ssi
->seg_seq
) {
229 ret
= NILFS_SEG_FAIL_SEQ
;
233 nblock
= ssi
->nblocks
;
234 if (unlikely(nblock
== 0 ||
235 nblock
> sbi
->s_nilfs
->ns_blocks_per_segment
)) {
236 /* This limits the number of blocks read in the CRC check */
237 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
240 if (calc_crc_cont(sbi
, bh_sum
, &crc
, sizeof(sum
->ss_datasum
),
241 ((u64
)nblock
<< sbi
->s_super
->s_blocksize_bits
),
242 pseg_start
, nblock
)) {
243 ret
= NILFS_SEG_FAIL_IO
;
246 if (crc
== le32_to_cpu(sum
->ss_datasum
))
249 ret
= NILFS_SEG_FAIL_CHECKSUM_FULL
;
256 static void *segsum_get(struct super_block
*sb
, struct buffer_head
**pbh
,
257 unsigned int *offset
, unsigned int bytes
)
262 BUG_ON((*pbh
)->b_size
< *offset
);
263 if (bytes
> (*pbh
)->b_size
- *offset
) {
264 blocknr
= (*pbh
)->b_blocknr
;
266 *pbh
= sb_bread(sb
, blocknr
+ 1);
271 ptr
= (*pbh
)->b_data
+ *offset
;
276 static void segsum_skip(struct super_block
*sb
, struct buffer_head
**pbh
,
277 unsigned int *offset
, unsigned int bytes
,
280 unsigned int rest_item_in_current_block
281 = ((*pbh
)->b_size
- *offset
) / bytes
;
283 if (count
<= rest_item_in_current_block
) {
284 *offset
+= bytes
* count
;
286 sector_t blocknr
= (*pbh
)->b_blocknr
;
287 unsigned int nitem_per_block
= (*pbh
)->b_size
/ bytes
;
290 count
-= rest_item_in_current_block
;
291 bcnt
= DIV_ROUND_UP(count
, nitem_per_block
);
292 *offset
= bytes
* (count
- (bcnt
- 1) * nitem_per_block
);
295 *pbh
= sb_bread(sb
, blocknr
+ bcnt
);
300 collect_blocks_from_segsum(struct nilfs_sb_info
*sbi
, sector_t sum_blocknr
,
301 struct nilfs_segsum_info
*ssi
,
302 struct list_head
*head
)
304 struct buffer_head
*bh
;
306 unsigned long nfinfo
= ssi
->nfinfo
;
307 sector_t blocknr
= sum_blocknr
+ ssi
->nsumblk
;
314 bh
= sb_bread(sbi
->s_super
, sum_blocknr
);
318 offset
= le16_to_cpu(
319 ((struct nilfs_segment_summary
*)bh
->b_data
)->ss_bytes
);
321 unsigned long nblocks
, ndatablk
, nnodeblk
;
322 struct nilfs_finfo
*finfo
;
324 finfo
= segsum_get(sbi
->s_super
, &bh
, &offset
, sizeof(*finfo
));
325 if (unlikely(!finfo
))
328 ino
= le64_to_cpu(finfo
->fi_ino
);
329 nblocks
= le32_to_cpu(finfo
->fi_nblocks
);
330 ndatablk
= le32_to_cpu(finfo
->fi_ndatablk
);
331 nnodeblk
= nblocks
- ndatablk
;
333 while (ndatablk
-- > 0) {
334 struct nilfs_recovery_block
*rb
;
335 struct nilfs_binfo_v
*binfo
;
337 binfo
= segsum_get(sbi
->s_super
, &bh
, &offset
,
339 if (unlikely(!binfo
))
342 rb
= kmalloc(sizeof(*rb
), GFP_NOFS
);
348 rb
->blocknr
= blocknr
++;
349 rb
->vblocknr
= le64_to_cpu(binfo
->bi_vblocknr
);
350 rb
->blkoff
= le64_to_cpu(binfo
->bi_blkoff
);
351 /* INIT_LIST_HEAD(&rb->list); */
352 list_add_tail(&rb
->list
, head
);
356 blocknr
+= nnodeblk
; /* always 0 for the data sync segments */
357 segsum_skip(sbi
->s_super
, &bh
, &offset
, sizeof(__le64
),
364 brelse(bh
); /* brelse(NULL) is just ignored */
368 static void dispose_recovery_list(struct list_head
*head
)
370 while (!list_empty(head
)) {
371 struct nilfs_recovery_block
*rb
372 = list_entry(head
->next
,
373 struct nilfs_recovery_block
, list
);
379 struct nilfs_segment_entry
{
380 struct list_head list
;
384 static int nilfs_segment_list_add(struct list_head
*head
, __u64 segnum
)
386 struct nilfs_segment_entry
*ent
= kmalloc(sizeof(*ent
), GFP_NOFS
);
391 ent
->segnum
= segnum
;
392 INIT_LIST_HEAD(&ent
->list
);
393 list_add_tail(&ent
->list
, head
);
397 void nilfs_dispose_segment_list(struct list_head
*head
)
399 while (!list_empty(head
)) {
400 struct nilfs_segment_entry
*ent
401 = list_entry(head
->next
,
402 struct nilfs_segment_entry
, list
);
403 list_del(&ent
->list
);
408 static int nilfs_prepare_segment_for_recovery(struct the_nilfs
*nilfs
,
409 struct nilfs_sb_info
*sbi
,
410 struct nilfs_recovery_info
*ri
)
412 struct list_head
*head
= &ri
->ri_used_segments
;
413 struct nilfs_segment_entry
*ent
, *n
;
414 struct inode
*sufile
= nilfs
->ns_sufile
;
419 segnum
[0] = nilfs
->ns_segnum
;
420 segnum
[1] = nilfs
->ns_nextnum
;
421 segnum
[2] = ri
->ri_segnum
;
422 segnum
[3] = ri
->ri_nextnum
;
424 nilfs_attach_writer(nilfs
, sbi
);
426 * Releasing the next segment of the latest super root.
427 * The next segment is invalidated by this recovery.
429 err
= nilfs_sufile_free(sufile
, segnum
[1]);
433 for (i
= 1; i
< 4; i
++) {
434 err
= nilfs_segment_list_add(head
, segnum
[i
]);
440 * Collecting segments written after the latest super root.
441 * These are marked dirty to avoid being reallocated in the next write.
443 list_for_each_entry_safe(ent
, n
, head
, list
) {
444 if (ent
->segnum
!= segnum
[0]) {
445 err
= nilfs_sufile_scrap(sufile
, ent
->segnum
);
449 list_del(&ent
->list
);
453 /* Allocate new segments for recovery */
454 err
= nilfs_sufile_alloc(sufile
, &segnum
[0]);
458 nilfs
->ns_pseg_offset
= 0;
459 nilfs
->ns_seg_seq
= ri
->ri_seq
+ 2;
460 nilfs
->ns_nextnum
= nilfs
->ns_segnum
= segnum
[0];
463 /* No need to recover sufile because it will be destroyed on error */
464 nilfs_detach_writer(nilfs
, sbi
);
468 static int nilfs_recovery_copy_block(struct nilfs_sb_info
*sbi
,
469 struct nilfs_recovery_block
*rb
,
472 struct buffer_head
*bh_org
;
475 bh_org
= sb_bread(sbi
->s_super
, rb
->blocknr
);
476 if (unlikely(!bh_org
))
479 kaddr
= kmap_atomic(page
, KM_USER0
);
480 memcpy(kaddr
+ bh_offset(bh_org
), bh_org
->b_data
, bh_org
->b_size
);
481 kunmap_atomic(kaddr
, KM_USER0
);
486 static int recover_dsync_blocks(struct nilfs_sb_info
*sbi
,
487 struct list_head
*head
,
488 unsigned long *nr_salvaged_blocks
)
491 struct nilfs_recovery_block
*rb
, *n
;
492 unsigned blocksize
= sbi
->s_super
->s_blocksize
;
495 int err
= 0, err2
= 0;
497 list_for_each_entry_safe(rb
, n
, head
, list
) {
498 inode
= nilfs_iget(sbi
->s_super
, rb
->ino
);
500 err
= PTR_ERR(inode
);
505 pos
= rb
->blkoff
<< inode
->i_blkbits
;
507 err
= block_write_begin(NULL
, inode
->i_mapping
, pos
, blocksize
,
508 0, &page
, NULL
, nilfs_get_block
);
512 err
= nilfs_recovery_copy_block(sbi
, rb
, page
);
516 err
= nilfs_set_file_dirty(sbi
, inode
, 1);
520 block_write_end(NULL
, inode
->i_mapping
, pos
, blocksize
,
521 blocksize
, page
, NULL
);
524 page_cache_release(page
);
526 (*nr_salvaged_blocks
)++;
531 page_cache_release(page
);
535 "NILFS warning: error recovering data block "
536 "(err=%d, ino=%lu, block-offset=%llu)\n",
537 err
, (unsigned long)rb
->ino
,
538 (unsigned long long)rb
->blkoff
);
542 iput(inode
); /* iput(NULL) is just ignored */
543 list_del_init(&rb
->list
);
550 * nilfs_do_roll_forward - salvage logical segments newer than the latest
552 * @sbi: nilfs_sb_info
554 * @ri: pointer to a nilfs_recovery_info
556 static int nilfs_do_roll_forward(struct the_nilfs
*nilfs
,
557 struct nilfs_sb_info
*sbi
,
558 struct nilfs_recovery_info
*ri
)
560 struct nilfs_segsum_info ssi
;
562 sector_t seg_start
, seg_end
; /* Starting/ending DBN of full segment */
563 unsigned long nsalvaged_blocks
= 0;
565 __u64 segnum
, nextnum
= 0;
568 LIST_HEAD(dsync_blocks
); /* list of data blocks to be recovered */
571 RF_DSYNC_ST
, /* scanning data-sync segments */
573 int state
= RF_INIT_ST
;
575 nilfs_attach_writer(nilfs
, sbi
);
576 pseg_start
= ri
->ri_lsegs_start
;
577 seg_seq
= ri
->ri_lsegs_start_seq
;
578 segnum
= nilfs_get_segnum_of_block(nilfs
, pseg_start
);
579 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
581 while (segnum
!= ri
->ri_segnum
|| pseg_start
<= ri
->ri_pseg_start
) {
583 ret
= load_segment_summary(sbi
, pseg_start
, seg_seq
, &ssi
);
585 if (ret
== NILFS_SEG_FAIL_IO
) {
591 if (unlikely(NILFS_SEG_HAS_SR(&ssi
)))
594 /* Found a valid partial segment; do recovery actions */
595 nextnum
= nilfs_get_segnum_of_block(nilfs
, ssi
.next
);
597 nilfs
->ns_ctime
= ssi
.ctime
;
598 if (!(ssi
.flags
& NILFS_SS_GC
))
599 nilfs
->ns_nongc_ctime
= ssi
.ctime
;
603 if (!NILFS_SEG_LOGBGN(&ssi
) || !NILFS_SEG_DSYNC(&ssi
))
608 if (!NILFS_SEG_DSYNC(&ssi
))
611 err
= collect_blocks_from_segsum(
612 sbi
, pseg_start
, &ssi
, &dsync_blocks
);
615 if (NILFS_SEG_LOGEND(&ssi
)) {
616 err
= recover_dsync_blocks(
617 sbi
, &dsync_blocks
, &nsalvaged_blocks
);
622 break; /* Fall through to try_next_pseg */
626 if (pseg_start
== ri
->ri_lsegs_end
)
628 pseg_start
+= ssi
.nblocks
;
629 if (pseg_start
< seg_end
)
634 if (pseg_start
== ri
->ri_lsegs_end
)
638 /* Looking to the next full segment */
643 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
644 pseg_start
= seg_start
;
647 if (nsalvaged_blocks
) {
648 printk(KERN_INFO
"NILFS (device %s): salvaged %lu blocks\n",
649 sbi
->s_super
->s_id
, nsalvaged_blocks
);
650 ri
->ri_need_recovery
= NILFS_RECOVERY_ROLLFORWARD_DONE
;
653 dispose_recovery_list(&dsync_blocks
);
654 nilfs_detach_writer(sbi
->s_nilfs
, sbi
);
661 "NILFS (device %s): Error roll-forwarding "
662 "(err=%d, pseg block=%llu). ",
663 sbi
->s_super
->s_id
, err
, (unsigned long long)pseg_start
);
667 static void nilfs_finish_roll_forward(struct the_nilfs
*nilfs
,
668 struct nilfs_sb_info
*sbi
,
669 struct nilfs_recovery_info
*ri
)
671 struct buffer_head
*bh
;
674 if (nilfs_get_segnum_of_block(nilfs
, ri
->ri_lsegs_start
) !=
675 nilfs_get_segnum_of_block(nilfs
, ri
->ri_super_root
))
678 bh
= sb_getblk(sbi
->s_super
, ri
->ri_lsegs_start
);
680 memset(bh
->b_data
, 0, bh
->b_size
);
681 set_buffer_dirty(bh
);
682 err
= sync_dirty_buffer(bh
);
685 "NILFS warning: buffer sync write failed during "
686 "post-cleaning of recovery.\n");
691 * nilfs_recover_logical_segments - salvage logical segments written after
692 * the latest super root
694 * @sbi: nilfs_sb_info
695 * @ri: pointer to a nilfs_recovery_info struct to store search results.
697 * Return Value: On success, 0 is returned. On error, one of the following
698 * negative error code is returned.
700 * %-EINVAL - Inconsistent filesystem state.
704 * %-ENOSPC - No space left on device (only in a panic state).
706 * %-ERESTARTSYS - Interrupted.
708 * %-ENOMEM - Insufficient memory available.
710 int nilfs_recover_logical_segments(struct the_nilfs
*nilfs
,
711 struct nilfs_sb_info
*sbi
,
712 struct nilfs_recovery_info
*ri
)
716 if (ri
->ri_lsegs_start
== 0 || ri
->ri_lsegs_end
== 0)
719 err
= nilfs_attach_checkpoint(sbi
, ri
->ri_cno
);
722 "NILFS: error loading the latest checkpoint.\n");
726 err
= nilfs_do_roll_forward(nilfs
, sbi
, ri
);
730 if (ri
->ri_need_recovery
== NILFS_RECOVERY_ROLLFORWARD_DONE
) {
731 err
= nilfs_prepare_segment_for_recovery(nilfs
, sbi
, ri
);
733 printk(KERN_ERR
"NILFS: Error preparing segments for "
738 err
= nilfs_attach_segment_constructor(sbi
);
742 set_nilfs_discontinued(nilfs
);
743 err
= nilfs_construct_segment(sbi
->s_super
);
744 nilfs_detach_segment_constructor(sbi
);
747 printk(KERN_ERR
"NILFS: Oops! recovery failed. "
752 nilfs_finish_roll_forward(nilfs
, sbi
, ri
);
756 nilfs_detach_checkpoint(sbi
);
761 * nilfs_search_super_root - search the latest valid super root
763 * @sbi: nilfs_sb_info
764 * @ri: pointer to a nilfs_recovery_info struct to store search results.
766 * nilfs_search_super_root() looks for the latest super-root from a partial
767 * segment pointed by the superblock. It sets up struct the_nilfs through
768 * this search. It fills nilfs_recovery_info (ri) required for recovery.
770 * Return Value: On success, 0 is returned. On error, one of the following
771 * negative error code is returned.
773 * %-EINVAL - No valid segment found
777 int nilfs_search_super_root(struct the_nilfs
*nilfs
, struct nilfs_sb_info
*sbi
,
778 struct nilfs_recovery_info
*ri
)
780 struct nilfs_segsum_info ssi
;
781 sector_t pseg_start
, pseg_end
, sr_pseg_start
= 0;
782 sector_t seg_start
, seg_end
; /* range of full segment (block number) */
785 __u64 segnum
, nextnum
= 0;
788 int empty_seg
= 0, scan_newer
= 0;
791 pseg_start
= nilfs
->ns_last_pseg
;
792 seg_seq
= nilfs
->ns_last_seq
;
793 cno
= nilfs
->ns_last_cno
;
794 segnum
= nilfs_get_segnum_of_block(nilfs
, pseg_start
);
796 /* Calculate range of segment */
797 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
799 /* Read ahead segment */
802 sb_breadahead(sbi
->s_super
, b
++);
805 /* Load segment summary */
806 ret
= load_segment_summary(sbi
, pseg_start
, seg_seq
, &ssi
);
808 if (ret
== NILFS_SEG_FAIL_IO
)
812 pseg_end
= pseg_start
+ ssi
.nblocks
- 1;
813 if (unlikely(pseg_end
> seg_end
)) {
814 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
818 /* A valid partial segment */
819 ri
->ri_pseg_start
= pseg_start
;
820 ri
->ri_seq
= seg_seq
;
821 ri
->ri_segnum
= segnum
;
822 nextnum
= nilfs_get_segnum_of_block(nilfs
, ssi
.next
);
823 ri
->ri_nextnum
= nextnum
;
826 if (!NILFS_SEG_HAS_SR(&ssi
) && !scan_newer
) {
827 /* This will never happen because a superblock
828 (last_segment) always points to a pseg
829 having a super root. */
830 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
834 if (pseg_start
== seg_start
) {
835 nilfs_get_segment_range(nilfs
, nextnum
, &b
, &end
);
837 sb_breadahead(sbi
->s_super
, b
++);
839 if (!NILFS_SEG_HAS_SR(&ssi
)) {
840 if (!ri
->ri_lsegs_start
&& NILFS_SEG_LOGBGN(&ssi
)) {
841 ri
->ri_lsegs_start
= pseg_start
;
842 ri
->ri_lsegs_start_seq
= seg_seq
;
844 if (NILFS_SEG_LOGEND(&ssi
))
845 ri
->ri_lsegs_end
= pseg_start
;
849 /* A valid super root was found. */
851 ri
->ri_super_root
= pseg_end
;
852 ri
->ri_lsegs_start
= ri
->ri_lsegs_end
= 0;
854 nilfs_dispose_segment_list(&segments
);
855 nilfs
->ns_pseg_offset
= (sr_pseg_start
= pseg_start
)
856 + ssi
.nblocks
- seg_start
;
857 nilfs
->ns_seg_seq
= seg_seq
;
858 nilfs
->ns_segnum
= segnum
;
859 nilfs
->ns_cno
= cno
; /* nilfs->ns_cno = ri->ri_cno + 1 */
860 nilfs
->ns_ctime
= ssi
.ctime
;
861 nilfs
->ns_nextnum
= nextnum
;
864 ri
->ri_need_recovery
= NILFS_RECOVERY_SR_UPDATED
;
866 if (nilfs
->ns_mount_state
& NILFS_VALID_FS
)
867 goto super_root_found
;
871 /* reset region for roll-forward */
872 pseg_start
+= ssi
.nblocks
;
873 if (pseg_start
< seg_end
)
878 /* Standing on a course, or met an inconsistent state */
879 pseg_start
+= ssi
.nblocks
;
880 if (pseg_start
< seg_end
)
888 * This can happen if a checkpoint was written without
889 * barriers, or as a result of an I/O failure.
894 /* Looking to the next full segment */
896 goto super_root_found
; /* found a valid super root */
898 ret
= nilfs_segment_list_add(&segments
, segnum
);
904 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
905 pseg_start
= seg_start
;
909 /* Updating pointers relating to the latest checkpoint */
910 list_splice_tail(&segments
, &ri
->ri_used_segments
);
911 nilfs
->ns_last_pseg
= sr_pseg_start
;
912 nilfs
->ns_last_seq
= nilfs
->ns_seg_seq
;
913 nilfs
->ns_last_cno
= ri
->ri_cno
;
917 nilfs_dispose_segment_list(&segments
);
918 return (ret
< 0) ? ret
: nilfs_warn_segment_error(ret
);