2 * recovery.c - NILFS recovery logic
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/swap.h>
26 #include <linux/crc32.h>
34 * Segment check result
38 NILFS_SEG_NO_SUPER_ROOT
,
42 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
,
43 NILFS_SEG_FAIL_CHECKSUM_FULL
,
44 NILFS_SEG_FAIL_CONSISTENCY
,
47 /* work structure for recovery */
48 struct nilfs_recovery_block
{
49 ino_t ino
; /* Inode number of the file that this block
51 sector_t blocknr
; /* block number */
52 __u64 vblocknr
; /* virtual block number */
53 unsigned long blkoff
; /* File offset of the data block (per block) */
54 struct list_head list
;
58 static int nilfs_warn_segment_error(int err
)
61 case NILFS_SEG_FAIL_IO
:
63 "NILFS warning: I/O error on loading last segment\n");
65 case NILFS_SEG_FAIL_MAGIC
:
67 "NILFS warning: Segment magic number invalid\n");
69 case NILFS_SEG_FAIL_SEQ
:
71 "NILFS warning: Sequence number mismatch\n");
73 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
:
75 "NILFS warning: Checksum error in super root\n");
77 case NILFS_SEG_FAIL_CHECKSUM_FULL
:
79 "NILFS warning: Checksum error in segment payload\n");
81 case NILFS_SEG_FAIL_CONSISTENCY
:
83 "NILFS warning: Inconsistent segment\n");
85 case NILFS_SEG_NO_SUPER_ROOT
:
87 "NILFS warning: No super root in the last segment\n");
93 static void store_segsum_info(struct nilfs_segsum_info
*ssi
,
94 struct nilfs_segment_summary
*sum
,
95 unsigned int blocksize
)
97 ssi
->flags
= le16_to_cpu(sum
->ss_flags
);
98 ssi
->seg_seq
= le64_to_cpu(sum
->ss_seq
);
99 ssi
->ctime
= le64_to_cpu(sum
->ss_create
);
100 ssi
->next
= le64_to_cpu(sum
->ss_next
);
101 ssi
->nblocks
= le32_to_cpu(sum
->ss_nblocks
);
102 ssi
->nfinfo
= le32_to_cpu(sum
->ss_nfinfo
);
103 ssi
->sumbytes
= le32_to_cpu(sum
->ss_sumbytes
);
105 ssi
->nsumblk
= DIV_ROUND_UP(ssi
->sumbytes
, blocksize
);
106 ssi
->nfileblk
= ssi
->nblocks
- ssi
->nsumblk
- !!NILFS_SEG_HAS_SR(ssi
);
110 * calc_crc_cont - check CRC of blocks continuously
111 * @sbi: nilfs_sb_info
112 * @bhs: buffer head of start block
113 * @sum: place to store result
114 * @offset: offset bytes in the first block
115 * @check_bytes: number of bytes to be checked
116 * @start: DBN of start block
117 * @nblock: number of blocks to be checked
119 static int calc_crc_cont(struct nilfs_sb_info
*sbi
, struct buffer_head
*bhs
,
120 u32
*sum
, unsigned long offset
, u64 check_bytes
,
121 sector_t start
, unsigned long nblock
)
123 unsigned long blocksize
= sbi
->s_super
->s_blocksize
;
127 BUG_ON(offset
>= blocksize
);
128 check_bytes
-= offset
;
129 size
= min_t(u64
, check_bytes
, blocksize
- offset
);
130 crc
= crc32_le(sbi
->s_nilfs
->ns_crc_seed
,
131 (unsigned char *)bhs
->b_data
+ offset
, size
);
134 struct buffer_head
*bh
135 = sb_bread(sbi
->s_super
, ++start
);
139 size
= min_t(u64
, check_bytes
, blocksize
);
140 crc
= crc32_le(crc
, bh
->b_data
, size
);
142 } while (--nblock
> 0);
149 * nilfs_read_super_root_block - read super root block
151 * @sr_block: disk block number of the super root block
152 * @pbh: address of a buffer_head pointer to return super root buffer
153 * @check: CRC check flag
155 int nilfs_read_super_root_block(struct super_block
*sb
, sector_t sr_block
,
156 struct buffer_head
**pbh
, int check
)
158 struct buffer_head
*bh_sr
;
159 struct nilfs_super_root
*sr
;
164 bh_sr
= sb_bread(sb
, sr_block
);
165 if (unlikely(!bh_sr
)) {
166 ret
= NILFS_SEG_FAIL_IO
;
170 sr
= (struct nilfs_super_root
*)bh_sr
->b_data
;
172 unsigned bytes
= le16_to_cpu(sr
->sr_bytes
);
174 if (bytes
== 0 || bytes
> sb
->s_blocksize
) {
175 ret
= NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
;
178 if (calc_crc_cont(NILFS_SB(sb
), bh_sr
, &crc
,
179 sizeof(sr
->sr_sum
), bytes
, sr_block
, 1)) {
180 ret
= NILFS_SEG_FAIL_IO
;
183 if (crc
!= le32_to_cpu(sr
->sr_sum
)) {
184 ret
= NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT
;
195 return nilfs_warn_segment_error(ret
);
199 * load_segment_summary - read segment summary of the specified partial segment
200 * @sbi: nilfs_sb_info
201 * @pseg_start: start disk block number of partial segment
202 * @seg_seq: sequence number requested
203 * @ssi: pointer to nilfs_segsum_info struct to store information
206 load_segment_summary(struct nilfs_sb_info
*sbi
, sector_t pseg_start
,
207 u64 seg_seq
, struct nilfs_segsum_info
*ssi
)
209 struct buffer_head
*bh_sum
;
210 struct nilfs_segment_summary
*sum
;
211 unsigned long nblock
;
213 int ret
= NILFS_SEG_FAIL_IO
;
215 bh_sum
= sb_bread(sbi
->s_super
, pseg_start
);
219 sum
= (struct nilfs_segment_summary
*)bh_sum
->b_data
;
221 /* Check consistency of segment summary */
222 if (le32_to_cpu(sum
->ss_magic
) != NILFS_SEGSUM_MAGIC
) {
223 ret
= NILFS_SEG_FAIL_MAGIC
;
226 store_segsum_info(ssi
, sum
, sbi
->s_super
->s_blocksize
);
227 if (seg_seq
!= ssi
->seg_seq
) {
228 ret
= NILFS_SEG_FAIL_SEQ
;
232 nblock
= ssi
->nblocks
;
233 if (unlikely(nblock
== 0 ||
234 nblock
> sbi
->s_nilfs
->ns_blocks_per_segment
)) {
235 /* This limits the number of blocks read in the CRC check */
236 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
239 if (calc_crc_cont(sbi
, bh_sum
, &crc
, sizeof(sum
->ss_datasum
),
240 ((u64
)nblock
<< sbi
->s_super
->s_blocksize_bits
),
241 pseg_start
, nblock
)) {
242 ret
= NILFS_SEG_FAIL_IO
;
245 if (crc
== le32_to_cpu(sum
->ss_datasum
))
248 ret
= NILFS_SEG_FAIL_CHECKSUM_FULL
;
255 static void *segsum_get(struct super_block
*sb
, struct buffer_head
**pbh
,
256 unsigned int *offset
, unsigned int bytes
)
261 BUG_ON((*pbh
)->b_size
< *offset
);
262 if (bytes
> (*pbh
)->b_size
- *offset
) {
263 blocknr
= (*pbh
)->b_blocknr
;
265 *pbh
= sb_bread(sb
, blocknr
+ 1);
270 ptr
= (*pbh
)->b_data
+ *offset
;
275 static void segsum_skip(struct super_block
*sb
, struct buffer_head
**pbh
,
276 unsigned int *offset
, unsigned int bytes
,
279 unsigned int rest_item_in_current_block
280 = ((*pbh
)->b_size
- *offset
) / bytes
;
282 if (count
<= rest_item_in_current_block
) {
283 *offset
+= bytes
* count
;
285 sector_t blocknr
= (*pbh
)->b_blocknr
;
286 unsigned int nitem_per_block
= (*pbh
)->b_size
/ bytes
;
289 count
-= rest_item_in_current_block
;
290 bcnt
= DIV_ROUND_UP(count
, nitem_per_block
);
291 *offset
= bytes
* (count
- (bcnt
- 1) * nitem_per_block
);
294 *pbh
= sb_bread(sb
, blocknr
+ bcnt
);
299 collect_blocks_from_segsum(struct nilfs_sb_info
*sbi
, sector_t sum_blocknr
,
300 struct nilfs_segsum_info
*ssi
,
301 struct list_head
*head
)
303 struct buffer_head
*bh
;
305 unsigned long nfinfo
= ssi
->nfinfo
;
306 sector_t blocknr
= sum_blocknr
+ ssi
->nsumblk
;
313 bh
= sb_bread(sbi
->s_super
, sum_blocknr
);
317 offset
= le16_to_cpu(
318 ((struct nilfs_segment_summary
*)bh
->b_data
)->ss_bytes
);
320 unsigned long nblocks
, ndatablk
, nnodeblk
;
321 struct nilfs_finfo
*finfo
;
323 finfo
= segsum_get(sbi
->s_super
, &bh
, &offset
, sizeof(*finfo
));
324 if (unlikely(!finfo
))
327 ino
= le64_to_cpu(finfo
->fi_ino
);
328 nblocks
= le32_to_cpu(finfo
->fi_nblocks
);
329 ndatablk
= le32_to_cpu(finfo
->fi_ndatablk
);
330 nnodeblk
= nblocks
- ndatablk
;
332 while (ndatablk
-- > 0) {
333 struct nilfs_recovery_block
*rb
;
334 struct nilfs_binfo_v
*binfo
;
336 binfo
= segsum_get(sbi
->s_super
, &bh
, &offset
,
338 if (unlikely(!binfo
))
341 rb
= kmalloc(sizeof(*rb
), GFP_NOFS
);
347 rb
->blocknr
= blocknr
++;
348 rb
->vblocknr
= le64_to_cpu(binfo
->bi_vblocknr
);
349 rb
->blkoff
= le64_to_cpu(binfo
->bi_blkoff
);
350 /* INIT_LIST_HEAD(&rb->list); */
351 list_add_tail(&rb
->list
, head
);
355 blocknr
+= nnodeblk
; /* always 0 for the data sync segments */
356 segsum_skip(sbi
->s_super
, &bh
, &offset
, sizeof(__le64
),
363 brelse(bh
); /* brelse(NULL) is just ignored */
367 static void dispose_recovery_list(struct list_head
*head
)
369 while (!list_empty(head
)) {
370 struct nilfs_recovery_block
*rb
371 = list_entry(head
->next
,
372 struct nilfs_recovery_block
, list
);
378 struct nilfs_segment_entry
{
379 struct list_head list
;
383 static int nilfs_segment_list_add(struct list_head
*head
, __u64 segnum
)
385 struct nilfs_segment_entry
*ent
= kmalloc(sizeof(*ent
), GFP_NOFS
);
390 ent
->segnum
= segnum
;
391 INIT_LIST_HEAD(&ent
->list
);
392 list_add_tail(&ent
->list
, head
);
396 void nilfs_dispose_segment_list(struct list_head
*head
)
398 while (!list_empty(head
)) {
399 struct nilfs_segment_entry
*ent
400 = list_entry(head
->next
,
401 struct nilfs_segment_entry
, list
);
402 list_del(&ent
->list
);
407 static int nilfs_prepare_segment_for_recovery(struct the_nilfs
*nilfs
,
408 struct nilfs_sb_info
*sbi
,
409 struct nilfs_recovery_info
*ri
)
411 struct list_head
*head
= &ri
->ri_used_segments
;
412 struct nilfs_segment_entry
*ent
, *n
;
413 struct inode
*sufile
= nilfs
->ns_sufile
;
418 segnum
[0] = nilfs
->ns_segnum
;
419 segnum
[1] = nilfs
->ns_nextnum
;
420 segnum
[2] = ri
->ri_segnum
;
421 segnum
[3] = ri
->ri_nextnum
;
423 nilfs_attach_writer(nilfs
, sbi
);
425 * Releasing the next segment of the latest super root.
426 * The next segment is invalidated by this recovery.
428 err
= nilfs_sufile_free(sufile
, segnum
[1]);
432 for (i
= 1; i
< 4; i
++) {
433 err
= nilfs_segment_list_add(head
, segnum
[i
]);
439 * Collecting segments written after the latest super root.
440 * These are marked dirty to avoid being reallocated in the next write.
442 list_for_each_entry_safe(ent
, n
, head
, list
) {
443 if (ent
->segnum
!= segnum
[0]) {
444 err
= nilfs_sufile_scrap(sufile
, ent
->segnum
);
448 list_del(&ent
->list
);
452 /* Allocate new segments for recovery */
453 err
= nilfs_sufile_alloc(sufile
, &segnum
[0]);
457 nilfs
->ns_pseg_offset
= 0;
458 nilfs
->ns_seg_seq
= ri
->ri_seq
+ 2;
459 nilfs
->ns_nextnum
= nilfs
->ns_segnum
= segnum
[0];
462 /* No need to recover sufile because it will be destroyed on error */
463 nilfs_detach_writer(nilfs
, sbi
);
467 static int nilfs_recovery_copy_block(struct nilfs_sb_info
*sbi
,
468 struct nilfs_recovery_block
*rb
,
471 struct buffer_head
*bh_org
;
474 bh_org
= sb_bread(sbi
->s_super
, rb
->blocknr
);
475 if (unlikely(!bh_org
))
478 kaddr
= kmap_atomic(page
, KM_USER0
);
479 memcpy(kaddr
+ bh_offset(bh_org
), bh_org
->b_data
, bh_org
->b_size
);
480 kunmap_atomic(kaddr
, KM_USER0
);
485 static int recover_dsync_blocks(struct nilfs_sb_info
*sbi
,
486 struct list_head
*head
,
487 unsigned long *nr_salvaged_blocks
)
490 struct nilfs_recovery_block
*rb
, *n
;
491 unsigned blocksize
= sbi
->s_super
->s_blocksize
;
494 int err
= 0, err2
= 0;
496 list_for_each_entry_safe(rb
, n
, head
, list
) {
497 inode
= nilfs_iget(sbi
->s_super
, rb
->ino
);
499 err
= PTR_ERR(inode
);
504 pos
= rb
->blkoff
<< inode
->i_blkbits
;
506 err
= block_write_begin(NULL
, inode
->i_mapping
, pos
, blocksize
,
507 0, &page
, NULL
, nilfs_get_block
);
511 err
= nilfs_recovery_copy_block(sbi
, rb
, page
);
515 err
= nilfs_set_file_dirty(sbi
, inode
, 1);
519 block_write_end(NULL
, inode
->i_mapping
, pos
, blocksize
,
520 blocksize
, page
, NULL
);
523 page_cache_release(page
);
525 (*nr_salvaged_blocks
)++;
530 page_cache_release(page
);
534 "NILFS warning: error recovering data block "
535 "(err=%d, ino=%lu, block-offset=%llu)\n",
536 err
, (unsigned long)rb
->ino
,
537 (unsigned long long)rb
->blkoff
);
541 iput(inode
); /* iput(NULL) is just ignored */
542 list_del_init(&rb
->list
);
549 * nilfs_do_roll_forward - salvage logical segments newer than the latest
551 * @sbi: nilfs_sb_info
553 * @ri: pointer to a nilfs_recovery_info
555 static int nilfs_do_roll_forward(struct the_nilfs
*nilfs
,
556 struct nilfs_sb_info
*sbi
,
557 struct nilfs_recovery_info
*ri
)
559 struct nilfs_segsum_info ssi
;
561 sector_t seg_start
, seg_end
; /* Starting/ending DBN of full segment */
562 unsigned long nsalvaged_blocks
= 0;
564 __u64 segnum
, nextnum
= 0;
567 LIST_HEAD(dsync_blocks
); /* list of data blocks to be recovered */
570 RF_DSYNC_ST
, /* scanning data-sync segments */
572 int state
= RF_INIT_ST
;
574 nilfs_attach_writer(nilfs
, sbi
);
575 pseg_start
= ri
->ri_lsegs_start
;
576 seg_seq
= ri
->ri_lsegs_start_seq
;
577 segnum
= nilfs_get_segnum_of_block(nilfs
, pseg_start
);
578 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
580 while (segnum
!= ri
->ri_segnum
|| pseg_start
<= ri
->ri_pseg_start
) {
582 ret
= load_segment_summary(sbi
, pseg_start
, seg_seq
, &ssi
);
584 if (ret
== NILFS_SEG_FAIL_IO
) {
590 if (unlikely(NILFS_SEG_HAS_SR(&ssi
)))
593 /* Found a valid partial segment; do recovery actions */
594 nextnum
= nilfs_get_segnum_of_block(nilfs
, ssi
.next
);
596 nilfs
->ns_ctime
= ssi
.ctime
;
597 if (!(ssi
.flags
& NILFS_SS_GC
))
598 nilfs
->ns_nongc_ctime
= ssi
.ctime
;
602 if (!NILFS_SEG_LOGBGN(&ssi
) || !NILFS_SEG_DSYNC(&ssi
))
607 if (!NILFS_SEG_DSYNC(&ssi
))
610 err
= collect_blocks_from_segsum(
611 sbi
, pseg_start
, &ssi
, &dsync_blocks
);
614 if (NILFS_SEG_LOGEND(&ssi
)) {
615 err
= recover_dsync_blocks(
616 sbi
, &dsync_blocks
, &nsalvaged_blocks
);
621 break; /* Fall through to try_next_pseg */
625 if (pseg_start
== ri
->ri_lsegs_end
)
627 pseg_start
+= ssi
.nblocks
;
628 if (pseg_start
< seg_end
)
633 if (pseg_start
== ri
->ri_lsegs_end
)
637 /* Looking to the next full segment */
642 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
643 pseg_start
= seg_start
;
646 if (nsalvaged_blocks
) {
647 printk(KERN_INFO
"NILFS (device %s): salvaged %lu blocks\n",
648 sbi
->s_super
->s_id
, nsalvaged_blocks
);
649 ri
->ri_need_recovery
= NILFS_RECOVERY_ROLLFORWARD_DONE
;
652 dispose_recovery_list(&dsync_blocks
);
653 nilfs_detach_writer(sbi
->s_nilfs
, sbi
);
660 "NILFS (device %s): Error roll-forwarding "
661 "(err=%d, pseg block=%llu). ",
662 sbi
->s_super
->s_id
, err
, (unsigned long long)pseg_start
);
666 static void nilfs_finish_roll_forward(struct the_nilfs
*nilfs
,
667 struct nilfs_sb_info
*sbi
,
668 struct nilfs_recovery_info
*ri
)
670 struct buffer_head
*bh
;
673 if (nilfs_get_segnum_of_block(nilfs
, ri
->ri_lsegs_start
) !=
674 nilfs_get_segnum_of_block(nilfs
, ri
->ri_super_root
))
677 bh
= sb_getblk(sbi
->s_super
, ri
->ri_lsegs_start
);
679 memset(bh
->b_data
, 0, bh
->b_size
);
680 set_buffer_dirty(bh
);
681 err
= sync_dirty_buffer(bh
);
684 "NILFS warning: buffer sync write failed during "
685 "post-cleaning of recovery.\n");
690 * nilfs_recover_logical_segments - salvage logical segments written after
691 * the latest super root
693 * @sbi: nilfs_sb_info
694 * @ri: pointer to a nilfs_recovery_info struct to store search results.
696 * Return Value: On success, 0 is returned. On error, one of the following
697 * negative error code is returned.
699 * %-EINVAL - Inconsistent filesystem state.
703 * %-ENOSPC - No space left on device (only in a panic state).
705 * %-ERESTARTSYS - Interrupted.
707 * %-ENOMEM - Insufficient memory available.
709 int nilfs_recover_logical_segments(struct the_nilfs
*nilfs
,
710 struct nilfs_sb_info
*sbi
,
711 struct nilfs_recovery_info
*ri
)
715 if (ri
->ri_lsegs_start
== 0 || ri
->ri_lsegs_end
== 0)
718 err
= nilfs_attach_checkpoint(sbi
, ri
->ri_cno
);
721 "NILFS: error loading the latest checkpoint.\n");
725 err
= nilfs_do_roll_forward(nilfs
, sbi
, ri
);
729 if (ri
->ri_need_recovery
== NILFS_RECOVERY_ROLLFORWARD_DONE
) {
730 err
= nilfs_prepare_segment_for_recovery(nilfs
, sbi
, ri
);
732 printk(KERN_ERR
"NILFS: Error preparing segments for "
737 err
= nilfs_attach_segment_constructor(sbi
);
741 set_nilfs_discontinued(nilfs
);
742 err
= nilfs_construct_segment(sbi
->s_super
);
743 nilfs_detach_segment_constructor(sbi
);
746 printk(KERN_ERR
"NILFS: Oops! recovery failed. "
751 nilfs_finish_roll_forward(nilfs
, sbi
, ri
);
755 nilfs_detach_checkpoint(sbi
);
760 * nilfs_search_super_root - search the latest valid super root
762 * @sbi: nilfs_sb_info
763 * @ri: pointer to a nilfs_recovery_info struct to store search results.
765 * nilfs_search_super_root() looks for the latest super-root from a partial
766 * segment pointed by the superblock. It sets up struct the_nilfs through
767 * this search. It fills nilfs_recovery_info (ri) required for recovery.
769 * Return Value: On success, 0 is returned. On error, one of the following
770 * negative error code is returned.
772 * %-EINVAL - No valid segment found
776 int nilfs_search_super_root(struct the_nilfs
*nilfs
, struct nilfs_sb_info
*sbi
,
777 struct nilfs_recovery_info
*ri
)
779 struct nilfs_segsum_info ssi
;
780 sector_t pseg_start
, pseg_end
, sr_pseg_start
= 0;
781 sector_t seg_start
, seg_end
; /* range of full segment (block number) */
784 __u64 segnum
, nextnum
= 0;
787 int empty_seg
= 0, scan_newer
= 0;
790 pseg_start
= nilfs
->ns_last_pseg
;
791 seg_seq
= nilfs
->ns_last_seq
;
792 cno
= nilfs
->ns_last_cno
;
793 segnum
= nilfs_get_segnum_of_block(nilfs
, pseg_start
);
795 /* Calculate range of segment */
796 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
798 /* Read ahead segment */
801 sb_breadahead(sbi
->s_super
, b
++);
804 /* Load segment summary */
805 ret
= load_segment_summary(sbi
, pseg_start
, seg_seq
, &ssi
);
807 if (ret
== NILFS_SEG_FAIL_IO
)
811 pseg_end
= pseg_start
+ ssi
.nblocks
- 1;
812 if (unlikely(pseg_end
> seg_end
)) {
813 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
817 /* A valid partial segment */
818 ri
->ri_pseg_start
= pseg_start
;
819 ri
->ri_seq
= seg_seq
;
820 ri
->ri_segnum
= segnum
;
821 nextnum
= nilfs_get_segnum_of_block(nilfs
, ssi
.next
);
822 ri
->ri_nextnum
= nextnum
;
825 if (!NILFS_SEG_HAS_SR(&ssi
) && !scan_newer
) {
826 /* This will never happen because a superblock
827 (last_segment) always points to a pseg
828 having a super root. */
829 ret
= NILFS_SEG_FAIL_CONSISTENCY
;
833 if (pseg_start
== seg_start
) {
834 nilfs_get_segment_range(nilfs
, nextnum
, &b
, &end
);
836 sb_breadahead(sbi
->s_super
, b
++);
838 if (!NILFS_SEG_HAS_SR(&ssi
)) {
839 if (!ri
->ri_lsegs_start
&& NILFS_SEG_LOGBGN(&ssi
)) {
840 ri
->ri_lsegs_start
= pseg_start
;
841 ri
->ri_lsegs_start_seq
= seg_seq
;
843 if (NILFS_SEG_LOGEND(&ssi
))
844 ri
->ri_lsegs_end
= pseg_start
;
848 /* A valid super root was found. */
850 ri
->ri_super_root
= pseg_end
;
851 ri
->ri_lsegs_start
= ri
->ri_lsegs_end
= 0;
853 nilfs_dispose_segment_list(&segments
);
854 nilfs
->ns_pseg_offset
= (sr_pseg_start
= pseg_start
)
855 + ssi
.nblocks
- seg_start
;
856 nilfs
->ns_seg_seq
= seg_seq
;
857 nilfs
->ns_segnum
= segnum
;
858 nilfs
->ns_cno
= cno
; /* nilfs->ns_cno = ri->ri_cno + 1 */
859 nilfs
->ns_ctime
= ssi
.ctime
;
860 nilfs
->ns_nextnum
= nextnum
;
863 ri
->ri_need_recovery
= NILFS_RECOVERY_SR_UPDATED
;
865 if (nilfs
->ns_mount_state
& NILFS_VALID_FS
)
866 goto super_root_found
;
870 /* reset region for roll-forward */
871 pseg_start
+= ssi
.nblocks
;
872 if (pseg_start
< seg_end
)
877 /* Standing on a course, or met an inconsistent state */
878 pseg_start
+= ssi
.nblocks
;
879 if (pseg_start
< seg_end
)
887 * This can happen if a checkpoint was written without
888 * barriers, or as a result of an I/O failure.
893 /* Looking to the next full segment */
895 goto super_root_found
; /* found a valid super root */
897 ret
= nilfs_segment_list_add(&segments
, segnum
);
903 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
, &seg_end
);
904 pseg_start
= seg_start
;
908 /* Updating pointers relating to the latest checkpoint */
909 list_splice_tail(&segments
, &ri
->ri_used_segments
);
910 nilfs
->ns_last_pseg
= sr_pseg_start
;
911 nilfs
->ns_last_seq
= nilfs
->ns_seg_seq
;
912 nilfs
->ns_last_cno
= ri
->ri_cno
;
916 nilfs_dispose_segment_list(&segments
);
917 return (ret
< 0) ? ret
: nilfs_warn_segment_error(ret
);