2 * segment.c - NILFS segment constructor.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/pagemap.h>
25 #include <linux/buffer_head.h>
26 #include <linux/writeback.h>
27 #include <linux/bio.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/backing-dev.h>
31 #include <linux/freezer.h>
32 #include <linux/kthread.h>
33 #include <linux/crc32.h>
34 #include <linux/pagevec.h>
35 #include <linux/slab.h>
49 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
51 #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
52 appended in collection retry loop */
54 /* Construction mode */
56 SC_LSEG_SR
= 1, /* Make a logical segment having a super root */
57 SC_LSEG_DSYNC
, /* Flush data blocks of a given file and make
58 a logical segment without a super root */
59 SC_FLUSH_FILE
, /* Flush data files, leads to segment writes without
60 creating a checkpoint */
61 SC_FLUSH_DAT
, /* Flush DAT file. This also creates segments without
65 /* Stage numbers of dirty block collection */
68 NILFS_ST_GC
, /* Collecting dirty blocks for GC */
74 NILFS_ST_SR
, /* Super root */
75 NILFS_ST_DSYNC
, /* Data sync blocks */
79 /* State flags of collection */
80 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
81 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
82 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
83 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
85 /* Operations depending on the construction mode and file type */
86 struct nilfs_sc_operations
{
87 int (*collect_data
)(struct nilfs_sc_info
*, struct buffer_head
*,
89 int (*collect_node
)(struct nilfs_sc_info
*, struct buffer_head
*,
91 int (*collect_bmap
)(struct nilfs_sc_info
*, struct buffer_head
*,
93 void (*write_data_binfo
)(struct nilfs_sc_info
*,
94 struct nilfs_segsum_pointer
*,
96 void (*write_node_binfo
)(struct nilfs_sc_info
*,
97 struct nilfs_segsum_pointer
*,
104 static void nilfs_segctor_start_timer(struct nilfs_sc_info
*);
105 static void nilfs_segctor_do_flush(struct nilfs_sc_info
*, int);
106 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info
*);
107 static void nilfs_dispose_list(struct nilfs_sb_info
*, struct list_head
*,
110 #define nilfs_cnt32_gt(a, b) \
111 (typecheck(__u32, a) && typecheck(__u32, b) && \
112 ((__s32)(b) - (__s32)(a) < 0))
113 #define nilfs_cnt32_ge(a, b) \
114 (typecheck(__u32, a) && typecheck(__u32, b) && \
115 ((__s32)(a) - (__s32)(b) >= 0))
116 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
117 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
119 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info
*ti
)
121 struct nilfs_transaction_info
*cur_ti
= current
->journal_info
;
125 if (cur_ti
->ti_magic
== NILFS_TI_MAGIC
)
126 return ++cur_ti
->ti_count
;
129 * If journal_info field is occupied by other FS,
130 * it is saved and will be restored on
131 * nilfs_transaction_commit().
134 "NILFS warning: journal info from a different "
136 save
= current
->journal_info
;
140 ti
= kmem_cache_alloc(nilfs_transaction_cachep
, GFP_NOFS
);
143 ti
->ti_flags
= NILFS_TI_DYNAMIC_ALLOC
;
149 ti
->ti_magic
= NILFS_TI_MAGIC
;
150 current
->journal_info
= ti
;
155 * nilfs_transaction_begin - start indivisible file operations.
157 * @ti: nilfs_transaction_info
158 * @vacancy_check: flags for vacancy rate checks
160 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
161 * the segment semaphore, to make a segment construction and write tasks
162 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
163 * The region enclosed by these two functions can be nested. To avoid a
164 * deadlock, the semaphore is only acquired or released in the outermost call.
166 * This function allocates a nilfs_transaction_info struct to keep context
167 * information on it. It is initialized and hooked onto the current task in
168 * the outermost call. If a pre-allocated struct is given to @ti, it is used
169 * instead; otherwise a new struct is assigned from a slab.
171 * When @vacancy_check flag is set, this function will check the amount of
172 * free space, and will wait for the GC to reclaim disk space if low capacity.
174 * Return Value: On success, 0 is returned. On error, one of the following
175 * negative error code is returned.
177 * %-ENOMEM - Insufficient memory available.
179 * %-ENOSPC - No space left on device
181 int nilfs_transaction_begin(struct super_block
*sb
,
182 struct nilfs_transaction_info
*ti
,
185 struct nilfs_sb_info
*sbi
;
186 struct the_nilfs
*nilfs
;
187 int ret
= nilfs_prepare_segment_lock(ti
);
189 if (unlikely(ret
< 0))
194 vfs_check_frozen(sb
, SB_FREEZE_WRITE
);
197 nilfs
= sbi
->s_nilfs
;
198 down_read(&nilfs
->ns_segctor_sem
);
199 if (vacancy_check
&& nilfs_near_disk_full(nilfs
)) {
200 up_read(&nilfs
->ns_segctor_sem
);
207 ti
= current
->journal_info
;
208 current
->journal_info
= ti
->ti_save
;
209 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
210 kmem_cache_free(nilfs_transaction_cachep
, ti
);
215 * nilfs_transaction_commit - commit indivisible file operations.
218 * nilfs_transaction_commit() releases the read semaphore which is
219 * acquired by nilfs_transaction_begin(). This is only performed
220 * in outermost call of this function. If a commit flag is set,
221 * nilfs_transaction_commit() sets a timer to start the segment
222 * constructor. If a sync flag is set, it starts construction
225 int nilfs_transaction_commit(struct super_block
*sb
)
227 struct nilfs_transaction_info
*ti
= current
->journal_info
;
228 struct nilfs_sb_info
*sbi
;
229 struct nilfs_sc_info
*sci
;
232 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
233 ti
->ti_flags
|= NILFS_TI_COMMIT
;
234 if (ti
->ti_count
> 0) {
241 if (ti
->ti_flags
& NILFS_TI_COMMIT
)
242 nilfs_segctor_start_timer(sci
);
243 if (atomic_read(&sbi
->s_nilfs
->ns_ndirtyblks
) >
245 nilfs_segctor_do_flush(sci
, 0);
247 up_read(&sbi
->s_nilfs
->ns_segctor_sem
);
248 current
->journal_info
= ti
->ti_save
;
250 if (ti
->ti_flags
& NILFS_TI_SYNC
)
251 err
= nilfs_construct_segment(sb
);
252 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
253 kmem_cache_free(nilfs_transaction_cachep
, ti
);
257 void nilfs_transaction_abort(struct super_block
*sb
)
259 struct nilfs_transaction_info
*ti
= current
->journal_info
;
261 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
262 if (ti
->ti_count
> 0) {
266 up_read(&NILFS_SB(sb
)->s_nilfs
->ns_segctor_sem
);
268 current
->journal_info
= ti
->ti_save
;
269 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
270 kmem_cache_free(nilfs_transaction_cachep
, ti
);
273 void nilfs_relax_pressure_in_lock(struct super_block
*sb
)
275 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
276 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
277 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
279 if (!sci
|| !sci
->sc_flush_request
)
282 set_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
);
283 up_read(&nilfs
->ns_segctor_sem
);
285 down_write(&nilfs
->ns_segctor_sem
);
286 if (sci
->sc_flush_request
&&
287 test_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
)) {
288 struct nilfs_transaction_info
*ti
= current
->journal_info
;
290 ti
->ti_flags
|= NILFS_TI_WRITER
;
291 nilfs_segctor_do_immediate_flush(sci
);
292 ti
->ti_flags
&= ~NILFS_TI_WRITER
;
294 downgrade_write(&nilfs
->ns_segctor_sem
);
297 static void nilfs_transaction_lock(struct nilfs_sb_info
*sbi
,
298 struct nilfs_transaction_info
*ti
,
301 struct nilfs_transaction_info
*cur_ti
= current
->journal_info
;
304 ti
->ti_flags
= NILFS_TI_WRITER
;
306 ti
->ti_save
= cur_ti
;
307 ti
->ti_magic
= NILFS_TI_MAGIC
;
308 INIT_LIST_HEAD(&ti
->ti_garbage
);
309 current
->journal_info
= ti
;
312 down_write(&sbi
->s_nilfs
->ns_segctor_sem
);
313 if (!test_bit(NILFS_SC_PRIOR_FLUSH
, &NILFS_SC(sbi
)->sc_flags
))
316 nilfs_segctor_do_immediate_flush(NILFS_SC(sbi
));
318 up_write(&sbi
->s_nilfs
->ns_segctor_sem
);
322 ti
->ti_flags
|= NILFS_TI_GC
;
325 static void nilfs_transaction_unlock(struct nilfs_sb_info
*sbi
)
327 struct nilfs_transaction_info
*ti
= current
->journal_info
;
329 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
330 BUG_ON(ti
->ti_count
> 0);
332 up_write(&sbi
->s_nilfs
->ns_segctor_sem
);
333 current
->journal_info
= ti
->ti_save
;
334 if (!list_empty(&ti
->ti_garbage
))
335 nilfs_dispose_list(sbi
, &ti
->ti_garbage
, 0);
338 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info
*sci
,
339 struct nilfs_segsum_pointer
*ssp
,
342 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
343 unsigned blocksize
= sci
->sc_super
->s_blocksize
;
346 if (unlikely(ssp
->offset
+ bytes
> blocksize
)) {
348 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp
->bh
,
349 &segbuf
->sb_segsum_buffers
));
350 ssp
->bh
= NILFS_SEGBUF_NEXT_BH(ssp
->bh
);
352 p
= ssp
->bh
->b_data
+ ssp
->offset
;
353 ssp
->offset
+= bytes
;
358 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
359 * @sci: nilfs_sc_info
361 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info
*sci
)
363 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
364 struct buffer_head
*sumbh
;
369 if (nilfs_doing_gc())
371 err
= nilfs_segbuf_reset(segbuf
, flags
, sci
->sc_seg_ctime
, sci
->sc_cno
);
375 sumbh
= NILFS_SEGBUF_FIRST_BH(&segbuf
->sb_segsum_buffers
);
376 sumbytes
= segbuf
->sb_sum
.sumbytes
;
377 sci
->sc_finfo_ptr
.bh
= sumbh
; sci
->sc_finfo_ptr
.offset
= sumbytes
;
378 sci
->sc_binfo_ptr
.bh
= sumbh
; sci
->sc_binfo_ptr
.offset
= sumbytes
;
379 sci
->sc_blk_cnt
= sci
->sc_datablk_cnt
= 0;
383 static int nilfs_segctor_feed_segment(struct nilfs_sc_info
*sci
)
385 sci
->sc_nblk_this_inc
+= sci
->sc_curseg
->sb_sum
.nblocks
;
386 if (NILFS_SEGBUF_IS_LAST(sci
->sc_curseg
, &sci
->sc_segbufs
))
387 return -E2BIG
; /* The current segment is filled up
389 sci
->sc_curseg
= NILFS_NEXT_SEGBUF(sci
->sc_curseg
);
390 return nilfs_segctor_reset_segment_buffer(sci
);
393 static int nilfs_segctor_add_super_root(struct nilfs_sc_info
*sci
)
395 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
398 if (segbuf
->sb_sum
.nblocks
>= segbuf
->sb_rest_blocks
) {
399 err
= nilfs_segctor_feed_segment(sci
);
402 segbuf
= sci
->sc_curseg
;
404 err
= nilfs_segbuf_extend_payload(segbuf
, &segbuf
->sb_super_root
);
406 segbuf
->sb_sum
.flags
|= NILFS_SS_SR
;
411 * Functions for making segment summary and payloads
413 static int nilfs_segctor_segsum_block_required(
414 struct nilfs_sc_info
*sci
, const struct nilfs_segsum_pointer
*ssp
,
417 unsigned blocksize
= sci
->sc_super
->s_blocksize
;
418 /* Size of finfo and binfo is enough small against blocksize */
420 return ssp
->offset
+ binfo_size
+
421 (!sci
->sc_blk_cnt
? sizeof(struct nilfs_finfo
) : 0) >
425 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info
*sci
,
428 sci
->sc_curseg
->sb_sum
.nfinfo
++;
429 sci
->sc_binfo_ptr
= sci
->sc_finfo_ptr
;
430 nilfs_segctor_map_segsum_entry(
431 sci
, &sci
->sc_binfo_ptr
, sizeof(struct nilfs_finfo
));
433 if (inode
->i_sb
&& !test_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
))
434 set_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
);
438 static void nilfs_segctor_end_finfo(struct nilfs_sc_info
*sci
,
441 struct nilfs_finfo
*finfo
;
442 struct nilfs_inode_info
*ii
;
443 struct nilfs_segment_buffer
*segbuf
;
446 if (sci
->sc_blk_cnt
== 0)
451 if (test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
453 else if (NILFS_ROOT_METADATA_FILE(inode
->i_ino
))
458 finfo
= nilfs_segctor_map_segsum_entry(sci
, &sci
->sc_finfo_ptr
,
460 finfo
->fi_ino
= cpu_to_le64(inode
->i_ino
);
461 finfo
->fi_nblocks
= cpu_to_le32(sci
->sc_blk_cnt
);
462 finfo
->fi_ndatablk
= cpu_to_le32(sci
->sc_datablk_cnt
);
463 finfo
->fi_cno
= cpu_to_le64(cno
);
465 segbuf
= sci
->sc_curseg
;
466 segbuf
->sb_sum
.sumbytes
= sci
->sc_binfo_ptr
.offset
+
467 sci
->sc_super
->s_blocksize
* (segbuf
->sb_sum
.nsumblk
- 1);
468 sci
->sc_finfo_ptr
= sci
->sc_binfo_ptr
;
469 sci
->sc_blk_cnt
= sci
->sc_datablk_cnt
= 0;
472 static int nilfs_segctor_add_file_block(struct nilfs_sc_info
*sci
,
473 struct buffer_head
*bh
,
477 struct nilfs_segment_buffer
*segbuf
;
478 int required
, err
= 0;
481 segbuf
= sci
->sc_curseg
;
482 required
= nilfs_segctor_segsum_block_required(
483 sci
, &sci
->sc_binfo_ptr
, binfo_size
);
484 if (segbuf
->sb_sum
.nblocks
+ required
+ 1 > segbuf
->sb_rest_blocks
) {
485 nilfs_segctor_end_finfo(sci
, inode
);
486 err
= nilfs_segctor_feed_segment(sci
);
491 if (unlikely(required
)) {
492 err
= nilfs_segbuf_extend_segsum(segbuf
);
496 if (sci
->sc_blk_cnt
== 0)
497 nilfs_segctor_begin_finfo(sci
, inode
);
499 nilfs_segctor_map_segsum_entry(sci
, &sci
->sc_binfo_ptr
, binfo_size
);
500 /* Substitution to vblocknr is delayed until update_blocknr() */
501 nilfs_segbuf_add_file_buffer(segbuf
, bh
);
508 * Callback functions that enumerate, mark, and collect dirty blocks
510 static int nilfs_collect_file_data(struct nilfs_sc_info
*sci
,
511 struct buffer_head
*bh
, struct inode
*inode
)
515 err
= nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
519 err
= nilfs_segctor_add_file_block(sci
, bh
, inode
,
520 sizeof(struct nilfs_binfo_v
));
522 sci
->sc_datablk_cnt
++;
526 static int nilfs_collect_file_node(struct nilfs_sc_info
*sci
,
527 struct buffer_head
*bh
,
530 return nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
533 static int nilfs_collect_file_bmap(struct nilfs_sc_info
*sci
,
534 struct buffer_head
*bh
,
537 WARN_ON(!buffer_dirty(bh
));
538 return nilfs_segctor_add_file_block(sci
, bh
, inode
, sizeof(__le64
));
541 static void nilfs_write_file_data_binfo(struct nilfs_sc_info
*sci
,
542 struct nilfs_segsum_pointer
*ssp
,
543 union nilfs_binfo
*binfo
)
545 struct nilfs_binfo_v
*binfo_v
= nilfs_segctor_map_segsum_entry(
546 sci
, ssp
, sizeof(*binfo_v
));
547 *binfo_v
= binfo
->bi_v
;
550 static void nilfs_write_file_node_binfo(struct nilfs_sc_info
*sci
,
551 struct nilfs_segsum_pointer
*ssp
,
552 union nilfs_binfo
*binfo
)
554 __le64
*vblocknr
= nilfs_segctor_map_segsum_entry(
555 sci
, ssp
, sizeof(*vblocknr
));
556 *vblocknr
= binfo
->bi_v
.bi_vblocknr
;
559 static struct nilfs_sc_operations nilfs_sc_file_ops
= {
560 .collect_data
= nilfs_collect_file_data
,
561 .collect_node
= nilfs_collect_file_node
,
562 .collect_bmap
= nilfs_collect_file_bmap
,
563 .write_data_binfo
= nilfs_write_file_data_binfo
,
564 .write_node_binfo
= nilfs_write_file_node_binfo
,
567 static int nilfs_collect_dat_data(struct nilfs_sc_info
*sci
,
568 struct buffer_head
*bh
, struct inode
*inode
)
572 err
= nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
576 err
= nilfs_segctor_add_file_block(sci
, bh
, inode
, sizeof(__le64
));
578 sci
->sc_datablk_cnt
++;
582 static int nilfs_collect_dat_bmap(struct nilfs_sc_info
*sci
,
583 struct buffer_head
*bh
, struct inode
*inode
)
585 WARN_ON(!buffer_dirty(bh
));
586 return nilfs_segctor_add_file_block(sci
, bh
, inode
,
587 sizeof(struct nilfs_binfo_dat
));
590 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info
*sci
,
591 struct nilfs_segsum_pointer
*ssp
,
592 union nilfs_binfo
*binfo
)
594 __le64
*blkoff
= nilfs_segctor_map_segsum_entry(sci
, ssp
,
596 *blkoff
= binfo
->bi_dat
.bi_blkoff
;
599 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info
*sci
,
600 struct nilfs_segsum_pointer
*ssp
,
601 union nilfs_binfo
*binfo
)
603 struct nilfs_binfo_dat
*binfo_dat
=
604 nilfs_segctor_map_segsum_entry(sci
, ssp
, sizeof(*binfo_dat
));
605 *binfo_dat
= binfo
->bi_dat
;
608 static struct nilfs_sc_operations nilfs_sc_dat_ops
= {
609 .collect_data
= nilfs_collect_dat_data
,
610 .collect_node
= nilfs_collect_file_node
,
611 .collect_bmap
= nilfs_collect_dat_bmap
,
612 .write_data_binfo
= nilfs_write_dat_data_binfo
,
613 .write_node_binfo
= nilfs_write_dat_node_binfo
,
616 static struct nilfs_sc_operations nilfs_sc_dsync_ops
= {
617 .collect_data
= nilfs_collect_file_data
,
618 .collect_node
= NULL
,
619 .collect_bmap
= NULL
,
620 .write_data_binfo
= nilfs_write_file_data_binfo
,
621 .write_node_binfo
= NULL
,
624 static size_t nilfs_lookup_dirty_data_buffers(struct inode
*inode
,
625 struct list_head
*listp
,
627 loff_t start
, loff_t end
)
629 struct address_space
*mapping
= inode
->i_mapping
;
631 pgoff_t index
= 0, last
= ULONG_MAX
;
635 if (unlikely(start
!= 0 || end
!= LLONG_MAX
)) {
637 * A valid range is given for sync-ing data pages. The
638 * range is rounded to per-page; extra dirty buffers
639 * may be included if blocksize < pagesize.
641 index
= start
>> PAGE_SHIFT
;
642 last
= end
>> PAGE_SHIFT
;
644 pagevec_init(&pvec
, 0);
646 if (unlikely(index
> last
) ||
647 !pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
648 min_t(pgoff_t
, last
- index
,
649 PAGEVEC_SIZE
- 1) + 1))
652 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
653 struct buffer_head
*bh
, *head
;
654 struct page
*page
= pvec
.pages
[i
];
656 if (unlikely(page
->index
> last
))
661 if (!page_has_buffers(page
))
662 create_empty_buffers(page
,
663 1 << inode
->i_blkbits
, 0);
667 bh
= head
= page_buffers(page
);
669 if (!buffer_dirty(bh
))
672 list_add_tail(&bh
->b_assoc_buffers
, listp
);
674 if (unlikely(ndirties
>= nlimit
)) {
675 pagevec_release(&pvec
);
679 } while (bh
= bh
->b_this_page
, bh
!= head
);
681 pagevec_release(&pvec
);
686 static void nilfs_lookup_dirty_node_buffers(struct inode
*inode
,
687 struct list_head
*listp
)
689 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
690 struct address_space
*mapping
= &ii
->i_btnode_cache
;
692 struct buffer_head
*bh
, *head
;
696 pagevec_init(&pvec
, 0);
698 while (pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
700 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
701 bh
= head
= page_buffers(pvec
.pages
[i
]);
703 if (buffer_dirty(bh
)) {
705 list_add_tail(&bh
->b_assoc_buffers
,
708 bh
= bh
->b_this_page
;
709 } while (bh
!= head
);
711 pagevec_release(&pvec
);
716 static void nilfs_dispose_list(struct nilfs_sb_info
*sbi
,
717 struct list_head
*head
, int force
)
719 struct nilfs_inode_info
*ii
, *n
;
720 struct nilfs_inode_info
*ivec
[SC_N_INODEVEC
], **pii
;
723 while (!list_empty(head
)) {
724 spin_lock(&sbi
->s_inode_lock
);
725 list_for_each_entry_safe(ii
, n
, head
, i_dirty
) {
726 list_del_init(&ii
->i_dirty
);
728 if (unlikely(ii
->i_bh
)) {
732 } else if (test_bit(NILFS_I_DIRTY
, &ii
->i_state
)) {
733 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
734 list_add_tail(&ii
->i_dirty
,
735 &sbi
->s_dirty_files
);
739 if (nv
== SC_N_INODEVEC
)
742 spin_unlock(&sbi
->s_inode_lock
);
744 for (pii
= ivec
; nv
> 0; pii
++, nv
--)
745 iput(&(*pii
)->vfs_inode
);
749 static int nilfs_test_metadata_dirty(struct the_nilfs
*nilfs
,
750 struct nilfs_root
*root
)
754 if (nilfs_mdt_fetch_dirty(root
->ifile
))
756 if (nilfs_mdt_fetch_dirty(nilfs
->ns_cpfile
))
758 if (nilfs_mdt_fetch_dirty(nilfs
->ns_sufile
))
760 if ((ret
|| nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs
->ns_dat
))
765 static int nilfs_segctor_clean(struct nilfs_sc_info
*sci
)
767 return list_empty(&sci
->sc_dirty_files
) &&
768 !test_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
) &&
769 sci
->sc_nfreesegs
== 0 &&
770 (!nilfs_doing_gc() || list_empty(&sci
->sc_gc_inodes
));
773 static int nilfs_segctor_confirm(struct nilfs_sc_info
*sci
)
775 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
778 if (nilfs_test_metadata_dirty(sbi
->s_nilfs
, sci
->sc_root
))
779 set_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
781 spin_lock(&sbi
->s_inode_lock
);
782 if (list_empty(&sbi
->s_dirty_files
) && nilfs_segctor_clean(sci
))
785 spin_unlock(&sbi
->s_inode_lock
);
789 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info
*sci
)
791 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
792 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
794 nilfs_mdt_clear_dirty(sci
->sc_root
->ifile
);
795 nilfs_mdt_clear_dirty(nilfs
->ns_cpfile
);
796 nilfs_mdt_clear_dirty(nilfs
->ns_sufile
);
797 nilfs_mdt_clear_dirty(nilfs
->ns_dat
);
800 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info
*sci
)
802 struct the_nilfs
*nilfs
= sci
->sc_sbi
->s_nilfs
;
803 struct buffer_head
*bh_cp
;
804 struct nilfs_checkpoint
*raw_cp
;
807 /* XXX: this interface will be changed */
808 err
= nilfs_cpfile_get_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, 1,
811 /* The following code is duplicated with cpfile. But, it is
812 needed to collect the checkpoint even if it was not newly
814 nilfs_mdt_mark_buffer_dirty(bh_cp
);
815 nilfs_mdt_mark_dirty(nilfs
->ns_cpfile
);
816 nilfs_cpfile_put_checkpoint(
817 nilfs
->ns_cpfile
, nilfs
->ns_cno
, bh_cp
);
819 WARN_ON(err
== -EINVAL
|| err
== -ENOENT
);
824 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info
*sci
)
826 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
827 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
828 struct buffer_head
*bh_cp
;
829 struct nilfs_checkpoint
*raw_cp
;
832 err
= nilfs_cpfile_get_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, 0,
835 WARN_ON(err
== -EINVAL
|| err
== -ENOENT
);
838 raw_cp
->cp_snapshot_list
.ssl_next
= 0;
839 raw_cp
->cp_snapshot_list
.ssl_prev
= 0;
840 raw_cp
->cp_inodes_count
=
841 cpu_to_le64(atomic_read(&sci
->sc_root
->inodes_count
));
842 raw_cp
->cp_blocks_count
=
843 cpu_to_le64(atomic_read(&sci
->sc_root
->blocks_count
));
844 raw_cp
->cp_nblk_inc
=
845 cpu_to_le64(sci
->sc_nblk_inc
+ sci
->sc_nblk_this_inc
);
846 raw_cp
->cp_create
= cpu_to_le64(sci
->sc_seg_ctime
);
847 raw_cp
->cp_cno
= cpu_to_le64(nilfs
->ns_cno
);
849 if (test_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
))
850 nilfs_checkpoint_clear_minor(raw_cp
);
852 nilfs_checkpoint_set_minor(raw_cp
);
854 nilfs_write_inode_common(sci
->sc_root
->ifile
,
855 &raw_cp
->cp_ifile_inode
, 1);
856 nilfs_cpfile_put_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, bh_cp
);
863 static void nilfs_fill_in_file_bmap(struct inode
*ifile
,
864 struct nilfs_inode_info
*ii
)
867 struct buffer_head
*ibh
;
868 struct nilfs_inode
*raw_inode
;
870 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
)) {
873 raw_inode
= nilfs_ifile_map_inode(ifile
, ii
->vfs_inode
.i_ino
,
875 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
876 nilfs_ifile_unmap_inode(ifile
, ii
->vfs_inode
.i_ino
, ibh
);
880 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info
*sci
)
882 struct nilfs_inode_info
*ii
;
884 list_for_each_entry(ii
, &sci
->sc_dirty_files
, i_dirty
) {
885 nilfs_fill_in_file_bmap(sci
->sc_root
->ifile
, ii
);
886 set_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
890 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info
*sci
,
891 struct the_nilfs
*nilfs
)
893 struct buffer_head
*bh_sr
;
894 struct nilfs_super_root
*raw_sr
;
895 unsigned isz
= nilfs
->ns_inode_size
;
897 bh_sr
= NILFS_LAST_SEGBUF(&sci
->sc_segbufs
)->sb_super_root
;
898 raw_sr
= (struct nilfs_super_root
*)bh_sr
->b_data
;
900 raw_sr
->sr_bytes
= cpu_to_le16(NILFS_SR_BYTES
);
901 raw_sr
->sr_nongc_ctime
902 = cpu_to_le64(nilfs_doing_gc() ?
903 nilfs
->ns_nongc_ctime
: sci
->sc_seg_ctime
);
904 raw_sr
->sr_flags
= 0;
906 nilfs_write_inode_common(nilfs
->ns_dat
, (void *)raw_sr
+
907 NILFS_SR_DAT_OFFSET(isz
), 1);
908 nilfs_write_inode_common(nilfs
->ns_cpfile
, (void *)raw_sr
+
909 NILFS_SR_CPFILE_OFFSET(isz
), 1);
910 nilfs_write_inode_common(nilfs
->ns_sufile
, (void *)raw_sr
+
911 NILFS_SR_SUFILE_OFFSET(isz
), 1);
914 static void nilfs_redirty_inodes(struct list_head
*head
)
916 struct nilfs_inode_info
*ii
;
918 list_for_each_entry(ii
, head
, i_dirty
) {
919 if (test_bit(NILFS_I_COLLECTED
, &ii
->i_state
))
920 clear_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
924 static void nilfs_drop_collected_inodes(struct list_head
*head
)
926 struct nilfs_inode_info
*ii
;
928 list_for_each_entry(ii
, head
, i_dirty
) {
929 if (!test_and_clear_bit(NILFS_I_COLLECTED
, &ii
->i_state
))
932 clear_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
);
933 set_bit(NILFS_I_UPDATED
, &ii
->i_state
);
937 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info
*sci
,
939 struct list_head
*listp
,
940 int (*collect
)(struct nilfs_sc_info
*,
941 struct buffer_head
*,
944 struct buffer_head
*bh
, *n
;
948 list_for_each_entry_safe(bh
, n
, listp
, b_assoc_buffers
) {
949 list_del_init(&bh
->b_assoc_buffers
);
950 err
= collect(sci
, bh
, inode
);
953 goto dispose_buffers
;
959 while (!list_empty(listp
)) {
960 bh
= list_entry(listp
->next
, struct buffer_head
,
962 list_del_init(&bh
->b_assoc_buffers
);
968 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info
*sci
)
970 /* Remaining number of blocks within segment buffer */
971 return sci
->sc_segbuf_nblocks
-
972 (sci
->sc_nblk_this_inc
+ sci
->sc_curseg
->sb_sum
.nblocks
);
975 static int nilfs_segctor_scan_file(struct nilfs_sc_info
*sci
,
977 struct nilfs_sc_operations
*sc_ops
)
979 LIST_HEAD(data_buffers
);
980 LIST_HEAD(node_buffers
);
983 if (!(sci
->sc_stage
.flags
& NILFS_CF_NODE
)) {
984 size_t n
, rest
= nilfs_segctor_buffer_rest(sci
);
986 n
= nilfs_lookup_dirty_data_buffers(
987 inode
, &data_buffers
, rest
+ 1, 0, LLONG_MAX
);
989 err
= nilfs_segctor_apply_buffers(
990 sci
, inode
, &data_buffers
,
991 sc_ops
->collect_data
);
992 BUG_ON(!err
); /* always receive -E2BIG or true error */
996 nilfs_lookup_dirty_node_buffers(inode
, &node_buffers
);
998 if (!(sci
->sc_stage
.flags
& NILFS_CF_NODE
)) {
999 err
= nilfs_segctor_apply_buffers(
1000 sci
, inode
, &data_buffers
, sc_ops
->collect_data
);
1001 if (unlikely(err
)) {
1002 /* dispose node list */
1003 nilfs_segctor_apply_buffers(
1004 sci
, inode
, &node_buffers
, NULL
);
1007 sci
->sc_stage
.flags
|= NILFS_CF_NODE
;
1010 err
= nilfs_segctor_apply_buffers(
1011 sci
, inode
, &node_buffers
, sc_ops
->collect_node
);
1015 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode
)->i_bmap
, &node_buffers
);
1016 err
= nilfs_segctor_apply_buffers(
1017 sci
, inode
, &node_buffers
, sc_ops
->collect_bmap
);
1021 nilfs_segctor_end_finfo(sci
, inode
);
1022 sci
->sc_stage
.flags
&= ~NILFS_CF_NODE
;
1028 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info
*sci
,
1029 struct inode
*inode
)
1031 LIST_HEAD(data_buffers
);
1032 size_t n
, rest
= nilfs_segctor_buffer_rest(sci
);
1035 n
= nilfs_lookup_dirty_data_buffers(inode
, &data_buffers
, rest
+ 1,
1036 sci
->sc_dsync_start
,
1039 err
= nilfs_segctor_apply_buffers(sci
, inode
, &data_buffers
,
1040 nilfs_collect_file_data
);
1042 nilfs_segctor_end_finfo(sci
, inode
);
1044 /* always receive -E2BIG or true error if n > rest */
1049 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info
*sci
, int mode
)
1051 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
1052 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
1053 struct list_head
*head
;
1054 struct nilfs_inode_info
*ii
;
1058 switch (sci
->sc_stage
.scnt
) {
1061 sci
->sc_stage
.flags
= 0;
1063 if (!test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
)) {
1064 sci
->sc_nblk_inc
= 0;
1065 sci
->sc_curseg
->sb_sum
.flags
= NILFS_SS_LOGBGN
;
1066 if (mode
== SC_LSEG_DSYNC
) {
1067 sci
->sc_stage
.scnt
= NILFS_ST_DSYNC
;
1072 sci
->sc_stage
.dirty_file_ptr
= NULL
;
1073 sci
->sc_stage
.gc_inode_ptr
= NULL
;
1074 if (mode
== SC_FLUSH_DAT
) {
1075 sci
->sc_stage
.scnt
= NILFS_ST_DAT
;
1078 sci
->sc_stage
.scnt
++; /* Fall through */
1080 if (nilfs_doing_gc()) {
1081 head
= &sci
->sc_gc_inodes
;
1082 ii
= list_prepare_entry(sci
->sc_stage
.gc_inode_ptr
,
1084 list_for_each_entry_continue(ii
, head
, i_dirty
) {
1085 err
= nilfs_segctor_scan_file(
1086 sci
, &ii
->vfs_inode
,
1087 &nilfs_sc_file_ops
);
1088 if (unlikely(err
)) {
1089 sci
->sc_stage
.gc_inode_ptr
= list_entry(
1091 struct nilfs_inode_info
,
1095 set_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
1097 sci
->sc_stage
.gc_inode_ptr
= NULL
;
1099 sci
->sc_stage
.scnt
++; /* Fall through */
1101 head
= &sci
->sc_dirty_files
;
1102 ii
= list_prepare_entry(sci
->sc_stage
.dirty_file_ptr
, head
,
1104 list_for_each_entry_continue(ii
, head
, i_dirty
) {
1105 clear_bit(NILFS_I_DIRTY
, &ii
->i_state
);
1107 err
= nilfs_segctor_scan_file(sci
, &ii
->vfs_inode
,
1108 &nilfs_sc_file_ops
);
1109 if (unlikely(err
)) {
1110 sci
->sc_stage
.dirty_file_ptr
=
1111 list_entry(ii
->i_dirty
.prev
,
1112 struct nilfs_inode_info
,
1116 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1117 /* XXX: required ? */
1119 sci
->sc_stage
.dirty_file_ptr
= NULL
;
1120 if (mode
== SC_FLUSH_FILE
) {
1121 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1124 sci
->sc_stage
.scnt
++;
1125 sci
->sc_stage
.flags
|= NILFS_CF_IFILE_STARTED
;
1127 case NILFS_ST_IFILE
:
1128 err
= nilfs_segctor_scan_file(sci
, sci
->sc_root
->ifile
,
1129 &nilfs_sc_file_ops
);
1132 sci
->sc_stage
.scnt
++;
1133 /* Creating a checkpoint */
1134 err
= nilfs_segctor_create_checkpoint(sci
);
1138 case NILFS_ST_CPFILE
:
1139 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_cpfile
,
1140 &nilfs_sc_file_ops
);
1143 sci
->sc_stage
.scnt
++; /* Fall through */
1144 case NILFS_ST_SUFILE
:
1145 err
= nilfs_sufile_freev(nilfs
->ns_sufile
, sci
->sc_freesegs
,
1146 sci
->sc_nfreesegs
, &ndone
);
1147 if (unlikely(err
)) {
1148 nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1149 sci
->sc_freesegs
, ndone
,
1153 sci
->sc_stage
.flags
|= NILFS_CF_SUFREED
;
1155 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_sufile
,
1156 &nilfs_sc_file_ops
);
1159 sci
->sc_stage
.scnt
++; /* Fall through */
1162 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_dat
,
1166 if (mode
== SC_FLUSH_DAT
) {
1167 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1170 sci
->sc_stage
.scnt
++; /* Fall through */
1172 if (mode
== SC_LSEG_SR
) {
1173 /* Appending a super root */
1174 err
= nilfs_segctor_add_super_root(sci
);
1178 /* End of a logical segment */
1179 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_LOGEND
;
1180 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1182 case NILFS_ST_DSYNC
:
1184 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_SYNDT
;
1185 ii
= sci
->sc_dsync_inode
;
1186 if (!test_bit(NILFS_I_BUSY
, &ii
->i_state
))
1189 err
= nilfs_segctor_scan_file_dsync(sci
, &ii
->vfs_inode
);
1192 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_LOGEND
;
1193 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1206 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1207 * @sci: nilfs_sc_info
1208 * @nilfs: nilfs object
1210 static int nilfs_segctor_begin_construction(struct nilfs_sc_info
*sci
,
1211 struct the_nilfs
*nilfs
)
1213 struct nilfs_segment_buffer
*segbuf
, *prev
;
1217 segbuf
= nilfs_segbuf_new(sci
->sc_super
);
1218 if (unlikely(!segbuf
))
1221 if (list_empty(&sci
->sc_write_logs
)) {
1222 nilfs_segbuf_map(segbuf
, nilfs
->ns_segnum
,
1223 nilfs
->ns_pseg_offset
, nilfs
);
1224 if (segbuf
->sb_rest_blocks
< NILFS_PSEG_MIN_BLOCKS
) {
1225 nilfs_shift_to_next_segment(nilfs
);
1226 nilfs_segbuf_map(segbuf
, nilfs
->ns_segnum
, 0, nilfs
);
1229 segbuf
->sb_sum
.seg_seq
= nilfs
->ns_seg_seq
;
1230 nextnum
= nilfs
->ns_nextnum
;
1232 if (nilfs
->ns_segnum
== nilfs
->ns_nextnum
)
1233 /* Start from the head of a new full segment */
1237 prev
= NILFS_LAST_SEGBUF(&sci
->sc_write_logs
);
1238 nilfs_segbuf_map_cont(segbuf
, prev
);
1239 segbuf
->sb_sum
.seg_seq
= prev
->sb_sum
.seg_seq
;
1240 nextnum
= prev
->sb_nextnum
;
1242 if (segbuf
->sb_rest_blocks
< NILFS_PSEG_MIN_BLOCKS
) {
1243 nilfs_segbuf_map(segbuf
, prev
->sb_nextnum
, 0, nilfs
);
1244 segbuf
->sb_sum
.seg_seq
++;
1249 err
= nilfs_sufile_mark_dirty(nilfs
->ns_sufile
, segbuf
->sb_segnum
);
1254 err
= nilfs_sufile_alloc(nilfs
->ns_sufile
, &nextnum
);
1258 nilfs_segbuf_set_next_segnum(segbuf
, nextnum
, nilfs
);
1260 BUG_ON(!list_empty(&sci
->sc_segbufs
));
1261 list_add_tail(&segbuf
->sb_list
, &sci
->sc_segbufs
);
1262 sci
->sc_segbuf_nblocks
= segbuf
->sb_rest_blocks
;
1266 nilfs_segbuf_free(segbuf
);
1270 static int nilfs_segctor_extend_segments(struct nilfs_sc_info
*sci
,
1271 struct the_nilfs
*nilfs
, int nadd
)
1273 struct nilfs_segment_buffer
*segbuf
, *prev
;
1274 struct inode
*sufile
= nilfs
->ns_sufile
;
1279 prev
= NILFS_LAST_SEGBUF(&sci
->sc_segbufs
);
1281 * Since the segment specified with nextnum might be allocated during
1282 * the previous construction, the buffer including its segusage may
1283 * not be dirty. The following call ensures that the buffer is dirty
1284 * and will pin the buffer on memory until the sufile is written.
1286 err
= nilfs_sufile_mark_dirty(sufile
, prev
->sb_nextnum
);
1290 for (i
= 0; i
< nadd
; i
++) {
1291 /* extend segment info */
1293 segbuf
= nilfs_segbuf_new(sci
->sc_super
);
1294 if (unlikely(!segbuf
))
1297 /* map this buffer to region of segment on-disk */
1298 nilfs_segbuf_map(segbuf
, prev
->sb_nextnum
, 0, nilfs
);
1299 sci
->sc_segbuf_nblocks
+= segbuf
->sb_rest_blocks
;
1301 /* allocate the next next full segment */
1302 err
= nilfs_sufile_alloc(sufile
, &nextnextnum
);
1306 segbuf
->sb_sum
.seg_seq
= prev
->sb_sum
.seg_seq
+ 1;
1307 nilfs_segbuf_set_next_segnum(segbuf
, nextnextnum
, nilfs
);
1309 list_add_tail(&segbuf
->sb_list
, &list
);
1312 list_splice_tail(&list
, &sci
->sc_segbufs
);
1316 nilfs_segbuf_free(segbuf
);
1318 list_for_each_entry(segbuf
, &list
, sb_list
) {
1319 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1320 WARN_ON(ret
); /* never fails */
1322 nilfs_destroy_logs(&list
);
1326 static void nilfs_free_incomplete_logs(struct list_head
*logs
,
1327 struct the_nilfs
*nilfs
)
1329 struct nilfs_segment_buffer
*segbuf
, *prev
;
1330 struct inode
*sufile
= nilfs
->ns_sufile
;
1333 segbuf
= NILFS_FIRST_SEGBUF(logs
);
1334 if (nilfs
->ns_nextnum
!= segbuf
->sb_nextnum
) {
1335 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1336 WARN_ON(ret
); /* never fails */
1338 if (atomic_read(&segbuf
->sb_err
)) {
1339 /* Case 1: The first segment failed */
1340 if (segbuf
->sb_pseg_start
!= segbuf
->sb_fseg_start
)
1341 /* Case 1a: Partial segment appended into an existing
1343 nilfs_terminate_segment(nilfs
, segbuf
->sb_fseg_start
,
1344 segbuf
->sb_fseg_end
);
1345 else /* Case 1b: New full segment */
1346 set_nilfs_discontinued(nilfs
);
1350 list_for_each_entry_continue(segbuf
, logs
, sb_list
) {
1351 if (prev
->sb_nextnum
!= segbuf
->sb_nextnum
) {
1352 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1353 WARN_ON(ret
); /* never fails */
1355 if (atomic_read(&segbuf
->sb_err
) &&
1356 segbuf
->sb_segnum
!= nilfs
->ns_nextnum
)
1357 /* Case 2: extended segment (!= next) failed */
1358 nilfs_sufile_set_error(sufile
, segbuf
->sb_segnum
);
1363 static void nilfs_segctor_update_segusage(struct nilfs_sc_info
*sci
,
1364 struct inode
*sufile
)
1366 struct nilfs_segment_buffer
*segbuf
;
1367 unsigned long live_blocks
;
1370 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1371 live_blocks
= segbuf
->sb_sum
.nblocks
+
1372 (segbuf
->sb_pseg_start
- segbuf
->sb_fseg_start
);
1373 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1376 WARN_ON(ret
); /* always succeed because the segusage is dirty */
1380 static void nilfs_cancel_segusage(struct list_head
*logs
, struct inode
*sufile
)
1382 struct nilfs_segment_buffer
*segbuf
;
1385 segbuf
= NILFS_FIRST_SEGBUF(logs
);
1386 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1387 segbuf
->sb_pseg_start
-
1388 segbuf
->sb_fseg_start
, 0);
1389 WARN_ON(ret
); /* always succeed because the segusage is dirty */
1391 list_for_each_entry_continue(segbuf
, logs
, sb_list
) {
1392 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1394 WARN_ON(ret
); /* always succeed */
1398 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info
*sci
,
1399 struct nilfs_segment_buffer
*last
,
1400 struct inode
*sufile
)
1402 struct nilfs_segment_buffer
*segbuf
= last
;
1405 list_for_each_entry_continue(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1406 sci
->sc_segbuf_nblocks
-= segbuf
->sb_rest_blocks
;
1407 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1410 nilfs_truncate_logs(&sci
->sc_segbufs
, last
);
1414 static int nilfs_segctor_collect(struct nilfs_sc_info
*sci
,
1415 struct the_nilfs
*nilfs
, int mode
)
1417 struct nilfs_cstage prev_stage
= sci
->sc_stage
;
1420 /* Collection retry loop */
1422 sci
->sc_nblk_this_inc
= 0;
1423 sci
->sc_curseg
= NILFS_FIRST_SEGBUF(&sci
->sc_segbufs
);
1425 err
= nilfs_segctor_reset_segment_buffer(sci
);
1429 err
= nilfs_segctor_collect_blocks(sci
, mode
);
1430 sci
->sc_nblk_this_inc
+= sci
->sc_curseg
->sb_sum
.nblocks
;
1434 if (unlikely(err
!= -E2BIG
))
1437 /* The current segment is filled up */
1438 if (mode
!= SC_LSEG_SR
|| sci
->sc_stage
.scnt
< NILFS_ST_CPFILE
)
1441 nilfs_clear_logs(&sci
->sc_segbufs
);
1443 err
= nilfs_segctor_extend_segments(sci
, nilfs
, nadd
);
1447 if (sci
->sc_stage
.flags
& NILFS_CF_SUFREED
) {
1448 err
= nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1452 WARN_ON(err
); /* do not happen */
1454 nadd
= min_t(int, nadd
<< 1, SC_MAX_SEGDELTA
);
1455 sci
->sc_stage
= prev_stage
;
1457 nilfs_segctor_truncate_segments(sci
, sci
->sc_curseg
, nilfs
->ns_sufile
);
1464 static void nilfs_list_replace_buffer(struct buffer_head
*old_bh
,
1465 struct buffer_head
*new_bh
)
1467 BUG_ON(!list_empty(&new_bh
->b_assoc_buffers
));
1469 list_replace_init(&old_bh
->b_assoc_buffers
, &new_bh
->b_assoc_buffers
);
1470 /* The caller must release old_bh */
1474 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info
*sci
,
1475 struct nilfs_segment_buffer
*segbuf
,
1478 struct inode
*inode
= NULL
;
1480 unsigned long nfinfo
= segbuf
->sb_sum
.nfinfo
;
1481 unsigned long nblocks
= 0, ndatablk
= 0;
1482 struct nilfs_sc_operations
*sc_op
= NULL
;
1483 struct nilfs_segsum_pointer ssp
;
1484 struct nilfs_finfo
*finfo
= NULL
;
1485 union nilfs_binfo binfo
;
1486 struct buffer_head
*bh
, *bh_org
;
1493 blocknr
= segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nsumblk
;
1494 ssp
.bh
= NILFS_SEGBUF_FIRST_BH(&segbuf
->sb_segsum_buffers
);
1495 ssp
.offset
= sizeof(struct nilfs_segment_summary
);
1497 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
1498 if (bh
== segbuf
->sb_super_root
)
1501 finfo
= nilfs_segctor_map_segsum_entry(
1502 sci
, &ssp
, sizeof(*finfo
));
1503 ino
= le64_to_cpu(finfo
->fi_ino
);
1504 nblocks
= le32_to_cpu(finfo
->fi_nblocks
);
1505 ndatablk
= le32_to_cpu(finfo
->fi_ndatablk
);
1507 if (buffer_nilfs_node(bh
))
1508 inode
= NILFS_BTNC_I(bh
->b_page
->mapping
);
1510 inode
= NILFS_AS_I(bh
->b_page
->mapping
);
1512 if (mode
== SC_LSEG_DSYNC
)
1513 sc_op
= &nilfs_sc_dsync_ops
;
1514 else if (ino
== NILFS_DAT_INO
)
1515 sc_op
= &nilfs_sc_dat_ops
;
1516 else /* file blocks */
1517 sc_op
= &nilfs_sc_file_ops
;
1521 err
= nilfs_bmap_assign(NILFS_I(inode
)->i_bmap
, &bh
, blocknr
,
1524 nilfs_list_replace_buffer(bh_org
, bh
);
1530 sc_op
->write_data_binfo(sci
, &ssp
, &binfo
);
1532 sc_op
->write_node_binfo(sci
, &ssp
, &binfo
);
1535 if (--nblocks
== 0) {
1539 } else if (ndatablk
> 0)
1549 static int nilfs_segctor_assign(struct nilfs_sc_info
*sci
, int mode
)
1551 struct nilfs_segment_buffer
*segbuf
;
1554 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1555 err
= nilfs_segctor_update_payload_blocknr(sci
, segbuf
, mode
);
1558 nilfs_segbuf_fill_in_segsum(segbuf
);
1564 nilfs_copy_replace_page_buffers(struct page
*page
, struct list_head
*out
)
1566 struct page
*clone_page
;
1567 struct buffer_head
*bh
, *head
, *bh2
;
1570 bh
= head
= page_buffers(page
);
1572 clone_page
= nilfs_alloc_private_page(bh
->b_bdev
, bh
->b_size
, 0);
1573 if (unlikely(!clone_page
))
1576 bh2
= page_buffers(clone_page
);
1577 kaddr
= kmap_atomic(page
, KM_USER0
);
1579 if (list_empty(&bh
->b_assoc_buffers
))
1582 page_cache_get(clone_page
); /* for each bh */
1583 memcpy(bh2
->b_data
, kaddr
+ bh_offset(bh
), bh2
->b_size
);
1584 bh2
->b_blocknr
= bh
->b_blocknr
;
1585 list_replace(&bh
->b_assoc_buffers
, &bh2
->b_assoc_buffers
);
1586 list_add_tail(&bh
->b_assoc_buffers
, out
);
1587 } while (bh
= bh
->b_this_page
, bh2
= bh2
->b_this_page
, bh
!= head
);
1588 kunmap_atomic(kaddr
, KM_USER0
);
1590 if (!TestSetPageWriteback(clone_page
))
1591 account_page_writeback(clone_page
);
1592 unlock_page(clone_page
);
1597 static int nilfs_test_page_to_be_frozen(struct page
*page
)
1599 struct address_space
*mapping
= page
->mapping
;
1601 if (!mapping
|| !mapping
->host
|| S_ISDIR(mapping
->host
->i_mode
))
1604 if (page_mapped(page
)) {
1605 ClearPageChecked(page
);
1608 return PageChecked(page
);
1611 static int nilfs_begin_page_io(struct page
*page
, struct list_head
*out
)
1613 if (!page
|| PageWriteback(page
))
1614 /* For split b-tree node pages, this function may be called
1615 twice. We ignore the 2nd or later calls by this check. */
1619 clear_page_dirty_for_io(page
);
1620 set_page_writeback(page
);
1623 if (nilfs_test_page_to_be_frozen(page
)) {
1624 int err
= nilfs_copy_replace_page_buffers(page
, out
);
1631 static int nilfs_segctor_prepare_write(struct nilfs_sc_info
*sci
,
1632 struct page
**failed_page
)
1634 struct nilfs_segment_buffer
*segbuf
;
1635 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1636 struct list_head
*list
= &sci
->sc_copied_buffers
;
1639 *failed_page
= NULL
;
1640 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1641 struct buffer_head
*bh
;
1643 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1645 if (bh
->b_page
!= bd_page
) {
1648 clear_page_dirty_for_io(bd_page
);
1649 set_page_writeback(bd_page
);
1650 unlock_page(bd_page
);
1652 bd_page
= bh
->b_page
;
1656 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1658 if (bh
== segbuf
->sb_super_root
) {
1659 if (bh
->b_page
!= bd_page
) {
1661 clear_page_dirty_for_io(bd_page
);
1662 set_page_writeback(bd_page
);
1663 unlock_page(bd_page
);
1664 bd_page
= bh
->b_page
;
1668 if (bh
->b_page
!= fs_page
) {
1669 err
= nilfs_begin_page_io(fs_page
, list
);
1670 if (unlikely(err
)) {
1671 *failed_page
= fs_page
;
1674 fs_page
= bh
->b_page
;
1680 clear_page_dirty_for_io(bd_page
);
1681 set_page_writeback(bd_page
);
1682 unlock_page(bd_page
);
1684 err
= nilfs_begin_page_io(fs_page
, list
);
1686 *failed_page
= fs_page
;
1691 static int nilfs_segctor_write(struct nilfs_sc_info
*sci
,
1692 struct the_nilfs
*nilfs
)
1696 ret
= nilfs_write_logs(&sci
->sc_segbufs
, nilfs
);
1697 list_splice_tail_init(&sci
->sc_segbufs
, &sci
->sc_write_logs
);
1701 static void __nilfs_end_page_io(struct page
*page
, int err
)
1704 if (!nilfs_page_buffers_clean(page
))
1705 __set_page_dirty_nobuffers(page
);
1706 ClearPageError(page
);
1708 __set_page_dirty_nobuffers(page
);
1712 if (buffer_nilfs_allocated(page_buffers(page
))) {
1713 if (TestClearPageWriteback(page
))
1714 dec_zone_page_state(page
, NR_WRITEBACK
);
1716 end_page_writeback(page
);
1719 static void nilfs_end_page_io(struct page
*page
, int err
)
1724 if (buffer_nilfs_node(page_buffers(page
)) && !PageWriteback(page
)) {
1726 * For b-tree node pages, this function may be called twice
1727 * or more because they might be split in a segment.
1729 if (PageDirty(page
)) {
1731 * For pages holding split b-tree node buffers, dirty
1732 * flag on the buffers may be cleared discretely.
1733 * In that case, the page is once redirtied for
1734 * remaining buffers, and it must be cancelled if
1735 * all the buffers get cleaned later.
1738 if (nilfs_page_buffers_clean(page
))
1739 __nilfs_clear_page_dirty(page
);
1745 __nilfs_end_page_io(page
, err
);
1748 static void nilfs_clear_copied_buffers(struct list_head
*list
, int err
)
1750 struct buffer_head
*bh
, *head
;
1753 while (!list_empty(list
)) {
1754 bh
= list_entry(list
->next
, struct buffer_head
,
1757 page_cache_get(page
);
1758 head
= bh
= page_buffers(page
);
1760 if (!list_empty(&bh
->b_assoc_buffers
)) {
1761 list_del_init(&bh
->b_assoc_buffers
);
1763 set_buffer_uptodate(bh
);
1764 clear_buffer_dirty(bh
);
1765 clear_buffer_delay(bh
);
1766 clear_buffer_nilfs_volatile(bh
);
1768 brelse(bh
); /* for b_assoc_buffers */
1770 } while ((bh
= bh
->b_this_page
) != head
);
1772 __nilfs_end_page_io(page
, err
);
1773 page_cache_release(page
);
1777 static void nilfs_abort_logs(struct list_head
*logs
, struct page
*failed_page
,
1780 struct nilfs_segment_buffer
*segbuf
;
1781 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1782 struct buffer_head
*bh
;
1784 if (list_empty(logs
))
1787 list_for_each_entry(segbuf
, logs
, sb_list
) {
1788 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1790 if (bh
->b_page
!= bd_page
) {
1792 end_page_writeback(bd_page
);
1793 bd_page
= bh
->b_page
;
1797 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1799 if (bh
== segbuf
->sb_super_root
) {
1800 if (bh
->b_page
!= bd_page
) {
1801 end_page_writeback(bd_page
);
1802 bd_page
= bh
->b_page
;
1806 if (bh
->b_page
!= fs_page
) {
1807 nilfs_end_page_io(fs_page
, err
);
1808 if (fs_page
&& fs_page
== failed_page
)
1810 fs_page
= bh
->b_page
;
1815 end_page_writeback(bd_page
);
1817 nilfs_end_page_io(fs_page
, err
);
1820 static void nilfs_segctor_abort_construction(struct nilfs_sc_info
*sci
,
1821 struct the_nilfs
*nilfs
, int err
)
1826 list_splice_tail_init(&sci
->sc_write_logs
, &logs
);
1827 ret
= nilfs_wait_on_logs(&logs
);
1828 nilfs_abort_logs(&logs
, NULL
, ret
? : err
);
1830 list_splice_tail_init(&sci
->sc_segbufs
, &logs
);
1831 nilfs_cancel_segusage(&logs
, nilfs
->ns_sufile
);
1832 nilfs_free_incomplete_logs(&logs
, nilfs
);
1833 nilfs_clear_copied_buffers(&sci
->sc_copied_buffers
, err
);
1835 if (sci
->sc_stage
.flags
& NILFS_CF_SUFREED
) {
1836 ret
= nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1840 WARN_ON(ret
); /* do not happen */
1843 nilfs_destroy_logs(&logs
);
1846 static void nilfs_set_next_segment(struct the_nilfs
*nilfs
,
1847 struct nilfs_segment_buffer
*segbuf
)
1849 nilfs
->ns_segnum
= segbuf
->sb_segnum
;
1850 nilfs
->ns_nextnum
= segbuf
->sb_nextnum
;
1851 nilfs
->ns_pseg_offset
= segbuf
->sb_pseg_start
- segbuf
->sb_fseg_start
1852 + segbuf
->sb_sum
.nblocks
;
1853 nilfs
->ns_seg_seq
= segbuf
->sb_sum
.seg_seq
;
1854 nilfs
->ns_ctime
= segbuf
->sb_sum
.ctime
;
1857 static void nilfs_segctor_complete_write(struct nilfs_sc_info
*sci
)
1859 struct nilfs_segment_buffer
*segbuf
;
1860 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1861 struct the_nilfs
*nilfs
= sci
->sc_sbi
->s_nilfs
;
1862 int update_sr
= false;
1864 list_for_each_entry(segbuf
, &sci
->sc_write_logs
, sb_list
) {
1865 struct buffer_head
*bh
;
1867 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1869 set_buffer_uptodate(bh
);
1870 clear_buffer_dirty(bh
);
1871 if (bh
->b_page
!= bd_page
) {
1873 end_page_writeback(bd_page
);
1874 bd_page
= bh
->b_page
;
1878 * We assume that the buffers which belong to the same page
1879 * continue over the buffer list.
1880 * Under this assumption, the last BHs of pages is
1881 * identifiable by the discontinuity of bh->b_page
1882 * (page != fs_page).
1884 * For B-tree node blocks, however, this assumption is not
1885 * guaranteed. The cleanup code of B-tree node pages needs
1888 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1890 set_buffer_uptodate(bh
);
1891 clear_buffer_dirty(bh
);
1892 clear_buffer_delay(bh
);
1893 clear_buffer_nilfs_volatile(bh
);
1894 clear_buffer_nilfs_redirected(bh
);
1895 if (bh
== segbuf
->sb_super_root
) {
1896 if (bh
->b_page
!= bd_page
) {
1897 end_page_writeback(bd_page
);
1898 bd_page
= bh
->b_page
;
1903 if (bh
->b_page
!= fs_page
) {
1904 nilfs_end_page_io(fs_page
, 0);
1905 fs_page
= bh
->b_page
;
1909 if (!nilfs_segbuf_simplex(segbuf
)) {
1910 if (segbuf
->sb_sum
.flags
& NILFS_SS_LOGBGN
) {
1911 set_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
);
1912 sci
->sc_lseg_stime
= jiffies
;
1914 if (segbuf
->sb_sum
.flags
& NILFS_SS_LOGEND
)
1915 clear_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
);
1919 * Since pages may continue over multiple segment buffers,
1920 * end of the last page must be checked outside of the loop.
1923 end_page_writeback(bd_page
);
1925 nilfs_end_page_io(fs_page
, 0);
1927 nilfs_clear_copied_buffers(&sci
->sc_copied_buffers
, 0);
1929 nilfs_drop_collected_inodes(&sci
->sc_dirty_files
);
1931 if (nilfs_doing_gc())
1932 nilfs_drop_collected_inodes(&sci
->sc_gc_inodes
);
1934 nilfs
->ns_nongc_ctime
= sci
->sc_seg_ctime
;
1936 sci
->sc_nblk_inc
+= sci
->sc_nblk_this_inc
;
1938 segbuf
= NILFS_LAST_SEGBUF(&sci
->sc_write_logs
);
1939 nilfs_set_next_segment(nilfs
, segbuf
);
1942 nilfs_set_last_segment(nilfs
, segbuf
->sb_pseg_start
,
1943 segbuf
->sb_sum
.seg_seq
, nilfs
->ns_cno
++);
1945 clear_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
);
1946 clear_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
1947 set_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
);
1948 nilfs_segctor_clear_metadata_dirty(sci
);
1950 clear_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
);
1953 static int nilfs_segctor_wait(struct nilfs_sc_info
*sci
)
1957 ret
= nilfs_wait_on_logs(&sci
->sc_write_logs
);
1959 nilfs_segctor_complete_write(sci
);
1960 nilfs_destroy_logs(&sci
->sc_write_logs
);
1965 static int nilfs_segctor_check_in_files(struct nilfs_sc_info
*sci
,
1966 struct nilfs_sb_info
*sbi
)
1968 struct nilfs_inode_info
*ii
, *n
;
1969 struct inode
*ifile
= sci
->sc_root
->ifile
;
1971 spin_lock(&sbi
->s_inode_lock
);
1973 list_for_each_entry_safe(ii
, n
, &sbi
->s_dirty_files
, i_dirty
) {
1975 struct buffer_head
*ibh
;
1978 spin_unlock(&sbi
->s_inode_lock
);
1979 err
= nilfs_ifile_get_inode_block(
1980 ifile
, ii
->vfs_inode
.i_ino
, &ibh
);
1981 if (unlikely(err
)) {
1982 nilfs_warning(sbi
->s_super
, __func__
,
1983 "failed to get inode block.\n");
1986 nilfs_mdt_mark_buffer_dirty(ibh
);
1987 nilfs_mdt_mark_dirty(ifile
);
1988 spin_lock(&sbi
->s_inode_lock
);
1989 if (likely(!ii
->i_bh
))
1996 clear_bit(NILFS_I_QUEUED
, &ii
->i_state
);
1997 set_bit(NILFS_I_BUSY
, &ii
->i_state
);
1998 list_del(&ii
->i_dirty
);
1999 list_add_tail(&ii
->i_dirty
, &sci
->sc_dirty_files
);
2001 spin_unlock(&sbi
->s_inode_lock
);
2006 static void nilfs_segctor_check_out_files(struct nilfs_sc_info
*sci
,
2007 struct nilfs_sb_info
*sbi
)
2009 struct nilfs_transaction_info
*ti
= current
->journal_info
;
2010 struct nilfs_inode_info
*ii
, *n
;
2012 spin_lock(&sbi
->s_inode_lock
);
2013 list_for_each_entry_safe(ii
, n
, &sci
->sc_dirty_files
, i_dirty
) {
2014 if (!test_and_clear_bit(NILFS_I_UPDATED
, &ii
->i_state
) ||
2015 test_bit(NILFS_I_DIRTY
, &ii
->i_state
))
2018 clear_bit(NILFS_I_BUSY
, &ii
->i_state
);
2021 list_del(&ii
->i_dirty
);
2022 list_add_tail(&ii
->i_dirty
, &ti
->ti_garbage
);
2024 spin_unlock(&sbi
->s_inode_lock
);
2028 * Main procedure of segment constructor
2030 static int nilfs_segctor_do_construct(struct nilfs_sc_info
*sci
, int mode
)
2032 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2033 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2034 struct page
*failed_page
;
2037 sci
->sc_stage
.scnt
= NILFS_ST_INIT
;
2038 sci
->sc_cno
= nilfs
->ns_cno
;
2040 err
= nilfs_segctor_check_in_files(sci
, sbi
);
2044 if (nilfs_test_metadata_dirty(nilfs
, sci
->sc_root
))
2045 set_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
2047 if (nilfs_segctor_clean(sci
))
2051 sci
->sc_stage
.flags
&= ~NILFS_CF_HISTORY_MASK
;
2053 err
= nilfs_segctor_begin_construction(sci
, nilfs
);
2057 /* Update time stamp */
2058 sci
->sc_seg_ctime
= get_seconds();
2060 err
= nilfs_segctor_collect(sci
, nilfs
, mode
);
2064 /* Avoid empty segment */
2065 if (sci
->sc_stage
.scnt
== NILFS_ST_DONE
&&
2066 nilfs_segbuf_empty(sci
->sc_curseg
)) {
2067 nilfs_segctor_abort_construction(sci
, nilfs
, 1);
2071 err
= nilfs_segctor_assign(sci
, mode
);
2075 if (sci
->sc_stage
.flags
& NILFS_CF_IFILE_STARTED
)
2076 nilfs_segctor_fill_in_file_bmap(sci
);
2078 if (mode
== SC_LSEG_SR
&&
2079 sci
->sc_stage
.scnt
>= NILFS_ST_CPFILE
) {
2080 err
= nilfs_segctor_fill_in_checkpoint(sci
);
2082 goto failed_to_write
;
2084 nilfs_segctor_fill_in_super_root(sci
, nilfs
);
2086 nilfs_segctor_update_segusage(sci
, nilfs
->ns_sufile
);
2088 /* Write partial segments */
2089 err
= nilfs_segctor_prepare_write(sci
, &failed_page
);
2091 nilfs_abort_logs(&sci
->sc_segbufs
, failed_page
, err
);
2092 goto failed_to_write
;
2095 nilfs_add_checksums_on_logs(&sci
->sc_segbufs
,
2096 nilfs
->ns_crc_seed
);
2098 err
= nilfs_segctor_write(sci
, nilfs
);
2100 goto failed_to_write
;
2102 if (sci
->sc_stage
.scnt
== NILFS_ST_DONE
||
2103 nilfs
->ns_blocksize_bits
!= PAGE_CACHE_SHIFT
) {
2105 * At this point, we avoid double buffering
2106 * for blocksize < pagesize because page dirty
2107 * flag is turned off during write and dirty
2108 * buffers are not properly collected for
2109 * pages crossing over segments.
2111 err
= nilfs_segctor_wait(sci
);
2113 goto failed_to_write
;
2115 } while (sci
->sc_stage
.scnt
!= NILFS_ST_DONE
);
2118 nilfs_segctor_check_out_files(sci
, sbi
);
2122 if (sci
->sc_stage
.flags
& NILFS_CF_IFILE_STARTED
)
2123 nilfs_redirty_inodes(&sci
->sc_dirty_files
);
2126 if (nilfs_doing_gc())
2127 nilfs_redirty_inodes(&sci
->sc_gc_inodes
);
2128 nilfs_segctor_abort_construction(sci
, nilfs
, err
);
2133 * nilfs_segctor_start_timer - set timer of background write
2134 * @sci: nilfs_sc_info
2136 * If the timer has already been set, it ignores the new request.
2137 * This function MUST be called within a section locking the segment
2140 static void nilfs_segctor_start_timer(struct nilfs_sc_info
*sci
)
2142 spin_lock(&sci
->sc_state_lock
);
2143 if (!(sci
->sc_state
& NILFS_SEGCTOR_COMMIT
)) {
2144 sci
->sc_timer
.expires
= jiffies
+ sci
->sc_interval
;
2145 add_timer(&sci
->sc_timer
);
2146 sci
->sc_state
|= NILFS_SEGCTOR_COMMIT
;
2148 spin_unlock(&sci
->sc_state_lock
);
2151 static void nilfs_segctor_do_flush(struct nilfs_sc_info
*sci
, int bn
)
2153 spin_lock(&sci
->sc_state_lock
);
2154 if (!(sci
->sc_flush_request
& (1 << bn
))) {
2155 unsigned long prev_req
= sci
->sc_flush_request
;
2157 sci
->sc_flush_request
|= (1 << bn
);
2159 wake_up(&sci
->sc_wait_daemon
);
2161 spin_unlock(&sci
->sc_state_lock
);
2165 * nilfs_flush_segment - trigger a segment construction for resource control
2167 * @ino: inode number of the file to be flushed out.
2169 void nilfs_flush_segment(struct super_block
*sb
, ino_t ino
)
2171 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2172 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2174 if (!sci
|| nilfs_doing_construction())
2176 nilfs_segctor_do_flush(sci
, NILFS_MDT_INODE(sb
, ino
) ? ino
: 0);
2177 /* assign bit 0 to data files */
2180 struct nilfs_segctor_wait_request
{
2187 static int nilfs_segctor_sync(struct nilfs_sc_info
*sci
)
2189 struct nilfs_segctor_wait_request wait_req
;
2192 spin_lock(&sci
->sc_state_lock
);
2193 init_wait(&wait_req
.wq
);
2195 atomic_set(&wait_req
.done
, 0);
2196 wait_req
.seq
= ++sci
->sc_seq_request
;
2197 spin_unlock(&sci
->sc_state_lock
);
2199 init_waitqueue_entry(&wait_req
.wq
, current
);
2200 add_wait_queue(&sci
->sc_wait_request
, &wait_req
.wq
);
2201 set_current_state(TASK_INTERRUPTIBLE
);
2202 wake_up(&sci
->sc_wait_daemon
);
2205 if (atomic_read(&wait_req
.done
)) {
2209 if (!signal_pending(current
)) {
2216 finish_wait(&sci
->sc_wait_request
, &wait_req
.wq
);
2220 static void nilfs_segctor_wakeup(struct nilfs_sc_info
*sci
, int err
)
2222 struct nilfs_segctor_wait_request
*wrq
, *n
;
2223 unsigned long flags
;
2225 spin_lock_irqsave(&sci
->sc_wait_request
.lock
, flags
);
2226 list_for_each_entry_safe(wrq
, n
, &sci
->sc_wait_request
.task_list
,
2228 if (!atomic_read(&wrq
->done
) &&
2229 nilfs_cnt32_ge(sci
->sc_seq_done
, wrq
->seq
)) {
2231 atomic_set(&wrq
->done
, 1);
2233 if (atomic_read(&wrq
->done
)) {
2234 wrq
->wq
.func(&wrq
->wq
,
2235 TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
,
2239 spin_unlock_irqrestore(&sci
->sc_wait_request
.lock
, flags
);
2243 * nilfs_construct_segment - construct a logical segment
2246 * Return Value: On success, 0 is retured. On errors, one of the following
2247 * negative error code is returned.
2249 * %-EROFS - Read only filesystem.
2253 * %-ENOSPC - No space left on device (only in a panic state).
2255 * %-ERESTARTSYS - Interrupted.
2257 * %-ENOMEM - Insufficient memory available.
2259 int nilfs_construct_segment(struct super_block
*sb
)
2261 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2262 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2263 struct nilfs_transaction_info
*ti
;
2269 /* A call inside transactions causes a deadlock. */
2270 BUG_ON((ti
= current
->journal_info
) && ti
->ti_magic
== NILFS_TI_MAGIC
);
2272 err
= nilfs_segctor_sync(sci
);
2277 * nilfs_construct_dsync_segment - construct a data-only logical segment
2279 * @inode: inode whose data blocks should be written out
2280 * @start: start byte offset
2281 * @end: end byte offset (inclusive)
2283 * Return Value: On success, 0 is retured. On errors, one of the following
2284 * negative error code is returned.
2286 * %-EROFS - Read only filesystem.
2290 * %-ENOSPC - No space left on device (only in a panic state).
2292 * %-ERESTARTSYS - Interrupted.
2294 * %-ENOMEM - Insufficient memory available.
2296 int nilfs_construct_dsync_segment(struct super_block
*sb
, struct inode
*inode
,
2297 loff_t start
, loff_t end
)
2299 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2300 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2301 struct nilfs_inode_info
*ii
;
2302 struct nilfs_transaction_info ti
;
2308 nilfs_transaction_lock(sbi
, &ti
, 0);
2310 ii
= NILFS_I(inode
);
2311 if (test_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
) ||
2312 nilfs_test_opt(sbi
, STRICT_ORDER
) ||
2313 test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
) ||
2314 nilfs_discontinued(sbi
->s_nilfs
)) {
2315 nilfs_transaction_unlock(sbi
);
2316 err
= nilfs_segctor_sync(sci
);
2320 spin_lock(&sbi
->s_inode_lock
);
2321 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
2322 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
2323 spin_unlock(&sbi
->s_inode_lock
);
2324 nilfs_transaction_unlock(sbi
);
2327 spin_unlock(&sbi
->s_inode_lock
);
2328 sci
->sc_dsync_inode
= ii
;
2329 sci
->sc_dsync_start
= start
;
2330 sci
->sc_dsync_end
= end
;
2332 err
= nilfs_segctor_do_construct(sci
, SC_LSEG_DSYNC
);
2334 nilfs_transaction_unlock(sbi
);
2338 #define FLUSH_FILE_BIT (0x1) /* data file only */
2339 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2342 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2343 * @sci: segment constructor object
2345 static void nilfs_segctor_accept(struct nilfs_sc_info
*sci
)
2347 spin_lock(&sci
->sc_state_lock
);
2348 sci
->sc_seq_accepted
= sci
->sc_seq_request
;
2349 spin_unlock(&sci
->sc_state_lock
);
2350 del_timer_sync(&sci
->sc_timer
);
2354 * nilfs_segctor_notify - notify the result of request to caller threads
2355 * @sci: segment constructor object
2356 * @mode: mode of log forming
2357 * @err: error code to be notified
2359 static void nilfs_segctor_notify(struct nilfs_sc_info
*sci
, int mode
, int err
)
2361 /* Clear requests (even when the construction failed) */
2362 spin_lock(&sci
->sc_state_lock
);
2364 if (mode
== SC_LSEG_SR
) {
2365 sci
->sc_state
&= ~NILFS_SEGCTOR_COMMIT
;
2366 sci
->sc_seq_done
= sci
->sc_seq_accepted
;
2367 nilfs_segctor_wakeup(sci
, err
);
2368 sci
->sc_flush_request
= 0;
2370 if (mode
== SC_FLUSH_FILE
)
2371 sci
->sc_flush_request
&= ~FLUSH_FILE_BIT
;
2372 else if (mode
== SC_FLUSH_DAT
)
2373 sci
->sc_flush_request
&= ~FLUSH_DAT_BIT
;
2375 /* re-enable timer if checkpoint creation was not done */
2376 if ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) &&
2377 time_before(jiffies
, sci
->sc_timer
.expires
))
2378 add_timer(&sci
->sc_timer
);
2380 spin_unlock(&sci
->sc_state_lock
);
2384 * nilfs_segctor_construct - form logs and write them to disk
2385 * @sci: segment constructor object
2386 * @mode: mode of log forming
2388 static int nilfs_segctor_construct(struct nilfs_sc_info
*sci
, int mode
)
2390 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2391 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2392 struct nilfs_super_block
**sbp
;
2395 nilfs_segctor_accept(sci
);
2397 if (nilfs_discontinued(nilfs
))
2399 if (!nilfs_segctor_confirm(sci
))
2400 err
= nilfs_segctor_do_construct(sci
, mode
);
2403 if (mode
!= SC_FLUSH_DAT
)
2404 atomic_set(&nilfs
->ns_ndirtyblks
, 0);
2405 if (test_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
) &&
2406 nilfs_discontinued(nilfs
)) {
2407 down_write(&nilfs
->ns_sem
);
2409 sbp
= nilfs_prepare_super(sbi
,
2410 nilfs_sb_will_flip(nilfs
));
2412 nilfs_set_log_cursor(sbp
[0], nilfs
);
2413 err
= nilfs_commit_super(sbi
, NILFS_SB_COMMIT
);
2415 up_write(&nilfs
->ns_sem
);
2419 nilfs_segctor_notify(sci
, mode
, err
);
2423 static void nilfs_construction_timeout(unsigned long data
)
2425 struct task_struct
*p
= (struct task_struct
*)data
;
2430 nilfs_remove_written_gcinodes(struct the_nilfs
*nilfs
, struct list_head
*head
)
2432 struct nilfs_inode_info
*ii
, *n
;
2434 list_for_each_entry_safe(ii
, n
, head
, i_dirty
) {
2435 if (!test_bit(NILFS_I_UPDATED
, &ii
->i_state
))
2437 list_del_init(&ii
->i_dirty
);
2438 iput(&ii
->vfs_inode
);
2442 int nilfs_clean_segments(struct super_block
*sb
, struct nilfs_argv
*argv
,
2445 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2446 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2447 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2448 struct nilfs_transaction_info ti
;
2454 nilfs_transaction_lock(sbi
, &ti
, 1);
2456 err
= nilfs_mdt_save_to_shadow_map(nilfs
->ns_dat
);
2460 err
= nilfs_ioctl_prepare_clean_segments(nilfs
, argv
, kbufs
);
2461 if (unlikely(err
)) {
2462 nilfs_mdt_restore_from_shadow_map(nilfs
->ns_dat
);
2466 sci
->sc_freesegs
= kbufs
[4];
2467 sci
->sc_nfreesegs
= argv
[4].v_nmembs
;
2468 list_splice_tail_init(&nilfs
->ns_gc_inodes
, &sci
->sc_gc_inodes
);
2471 err
= nilfs_segctor_construct(sci
, SC_LSEG_SR
);
2472 nilfs_remove_written_gcinodes(nilfs
, &sci
->sc_gc_inodes
);
2477 nilfs_warning(sb
, __func__
,
2478 "segment construction failed. (err=%d)", err
);
2479 set_current_state(TASK_INTERRUPTIBLE
);
2480 schedule_timeout(sci
->sc_interval
);
2482 if (nilfs_test_opt(sbi
, DISCARD
)) {
2483 int ret
= nilfs_discard_segments(nilfs
, sci
->sc_freesegs
,
2487 "NILFS warning: error %d on discard request, "
2488 "turning discards off for the device\n", ret
);
2489 nilfs_clear_opt(sbi
, DISCARD
);
2494 sci
->sc_freesegs
= NULL
;
2495 sci
->sc_nfreesegs
= 0;
2496 nilfs_mdt_clear_shadow_map(nilfs
->ns_dat
);
2497 nilfs_transaction_unlock(sbi
);
2501 static void nilfs_segctor_thread_construct(struct nilfs_sc_info
*sci
, int mode
)
2503 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2504 struct nilfs_transaction_info ti
;
2506 nilfs_transaction_lock(sbi
, &ti
, 0);
2507 nilfs_segctor_construct(sci
, mode
);
2510 * Unclosed segment should be retried. We do this using sc_timer.
2511 * Timeout of sc_timer will invoke complete construction which leads
2512 * to close the current logical segment.
2514 if (test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
))
2515 nilfs_segctor_start_timer(sci
);
2517 nilfs_transaction_unlock(sbi
);
2520 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info
*sci
)
2525 spin_lock(&sci
->sc_state_lock
);
2526 mode
= (sci
->sc_flush_request
& FLUSH_DAT_BIT
) ?
2527 SC_FLUSH_DAT
: SC_FLUSH_FILE
;
2528 spin_unlock(&sci
->sc_state_lock
);
2531 err
= nilfs_segctor_do_construct(sci
, mode
);
2533 spin_lock(&sci
->sc_state_lock
);
2534 sci
->sc_flush_request
&= (mode
== SC_FLUSH_FILE
) ?
2535 ~FLUSH_FILE_BIT
: ~FLUSH_DAT_BIT
;
2536 spin_unlock(&sci
->sc_state_lock
);
2538 clear_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
);
2541 static int nilfs_segctor_flush_mode(struct nilfs_sc_info
*sci
)
2543 if (!test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
) ||
2544 time_before(jiffies
, sci
->sc_lseg_stime
+ sci
->sc_mjcp_freq
)) {
2545 if (!(sci
->sc_flush_request
& ~FLUSH_FILE_BIT
))
2546 return SC_FLUSH_FILE
;
2547 else if (!(sci
->sc_flush_request
& ~FLUSH_DAT_BIT
))
2548 return SC_FLUSH_DAT
;
2554 * nilfs_segctor_thread - main loop of the segment constructor thread.
2555 * @arg: pointer to a struct nilfs_sc_info.
2557 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2558 * to execute segment constructions.
2560 static int nilfs_segctor_thread(void *arg
)
2562 struct nilfs_sc_info
*sci
= (struct nilfs_sc_info
*)arg
;
2563 struct the_nilfs
*nilfs
= sci
->sc_sbi
->s_nilfs
;
2566 sci
->sc_timer
.data
= (unsigned long)current
;
2567 sci
->sc_timer
.function
= nilfs_construction_timeout
;
2570 sci
->sc_task
= current
;
2571 wake_up(&sci
->sc_wait_task
); /* for nilfs_segctor_start_thread() */
2573 "segctord starting. Construction interval = %lu seconds, "
2574 "CP frequency < %lu seconds\n",
2575 sci
->sc_interval
/ HZ
, sci
->sc_mjcp_freq
/ HZ
);
2577 spin_lock(&sci
->sc_state_lock
);
2582 if (sci
->sc_state
& NILFS_SEGCTOR_QUIT
)
2585 if (timeout
|| sci
->sc_seq_request
!= sci
->sc_seq_done
)
2587 else if (!sci
->sc_flush_request
)
2590 mode
= nilfs_segctor_flush_mode(sci
);
2592 spin_unlock(&sci
->sc_state_lock
);
2593 nilfs_segctor_thread_construct(sci
, mode
);
2594 spin_lock(&sci
->sc_state_lock
);
2599 if (freezing(current
)) {
2600 spin_unlock(&sci
->sc_state_lock
);
2602 spin_lock(&sci
->sc_state_lock
);
2605 int should_sleep
= 1;
2607 prepare_to_wait(&sci
->sc_wait_daemon
, &wait
,
2608 TASK_INTERRUPTIBLE
);
2610 if (sci
->sc_seq_request
!= sci
->sc_seq_done
)
2612 else if (sci
->sc_flush_request
)
2614 else if (sci
->sc_state
& NILFS_SEGCTOR_COMMIT
)
2615 should_sleep
= time_before(jiffies
,
2616 sci
->sc_timer
.expires
);
2619 spin_unlock(&sci
->sc_state_lock
);
2621 spin_lock(&sci
->sc_state_lock
);
2623 finish_wait(&sci
->sc_wait_daemon
, &wait
);
2624 timeout
= ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) &&
2625 time_after_eq(jiffies
, sci
->sc_timer
.expires
));
2627 if (nilfs_sb_dirty(nilfs
) && nilfs_sb_need_update(nilfs
))
2628 set_nilfs_discontinued(nilfs
);
2633 spin_unlock(&sci
->sc_state_lock
);
2636 sci
->sc_task
= NULL
;
2637 wake_up(&sci
->sc_wait_task
); /* for nilfs_segctor_kill_thread() */
2641 static int nilfs_segctor_start_thread(struct nilfs_sc_info
*sci
)
2643 struct task_struct
*t
;
2645 t
= kthread_run(nilfs_segctor_thread
, sci
, "segctord");
2647 int err
= PTR_ERR(t
);
2649 printk(KERN_ERR
"NILFS: error %d creating segctord thread\n",
2653 wait_event(sci
->sc_wait_task
, sci
->sc_task
!= NULL
);
2657 static void nilfs_segctor_kill_thread(struct nilfs_sc_info
*sci
)
2658 __acquires(&sci
->sc_state_lock
)
2659 __releases(&sci
->sc_state_lock
)
2661 sci
->sc_state
|= NILFS_SEGCTOR_QUIT
;
2663 while (sci
->sc_task
) {
2664 wake_up(&sci
->sc_wait_daemon
);
2665 spin_unlock(&sci
->sc_state_lock
);
2666 wait_event(sci
->sc_wait_task
, sci
->sc_task
== NULL
);
2667 spin_lock(&sci
->sc_state_lock
);
2672 * Setup & clean-up functions
2674 static struct nilfs_sc_info
*nilfs_segctor_new(struct nilfs_sb_info
*sbi
,
2675 struct nilfs_root
*root
)
2677 struct nilfs_sc_info
*sci
;
2679 sci
= kzalloc(sizeof(*sci
), GFP_KERNEL
);
2684 sci
->sc_super
= sbi
->s_super
;
2686 nilfs_get_root(root
);
2687 sci
->sc_root
= root
;
2689 init_waitqueue_head(&sci
->sc_wait_request
);
2690 init_waitqueue_head(&sci
->sc_wait_daemon
);
2691 init_waitqueue_head(&sci
->sc_wait_task
);
2692 spin_lock_init(&sci
->sc_state_lock
);
2693 INIT_LIST_HEAD(&sci
->sc_dirty_files
);
2694 INIT_LIST_HEAD(&sci
->sc_segbufs
);
2695 INIT_LIST_HEAD(&sci
->sc_write_logs
);
2696 INIT_LIST_HEAD(&sci
->sc_gc_inodes
);
2697 INIT_LIST_HEAD(&sci
->sc_copied_buffers
);
2698 init_timer(&sci
->sc_timer
);
2700 sci
->sc_interval
= HZ
* NILFS_SC_DEFAULT_TIMEOUT
;
2701 sci
->sc_mjcp_freq
= HZ
* NILFS_SC_DEFAULT_SR_FREQ
;
2702 sci
->sc_watermark
= NILFS_SC_DEFAULT_WATERMARK
;
2704 if (sbi
->s_interval
)
2705 sci
->sc_interval
= sbi
->s_interval
;
2706 if (sbi
->s_watermark
)
2707 sci
->sc_watermark
= sbi
->s_watermark
;
2711 static void nilfs_segctor_write_out(struct nilfs_sc_info
*sci
)
2713 int ret
, retrycount
= NILFS_SC_CLEANUP_RETRY
;
2715 /* The segctord thread was stopped and its timer was removed.
2716 But some tasks remain. */
2718 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2719 struct nilfs_transaction_info ti
;
2721 nilfs_transaction_lock(sbi
, &ti
, 0);
2722 ret
= nilfs_segctor_construct(sci
, SC_LSEG_SR
);
2723 nilfs_transaction_unlock(sbi
);
2725 } while (ret
&& retrycount
-- > 0);
2729 * nilfs_segctor_destroy - destroy the segment constructor.
2730 * @sci: nilfs_sc_info
2732 * nilfs_segctor_destroy() kills the segctord thread and frees
2733 * the nilfs_sc_info struct.
2734 * Caller must hold the segment semaphore.
2736 static void nilfs_segctor_destroy(struct nilfs_sc_info
*sci
)
2738 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2741 up_write(&sbi
->s_nilfs
->ns_segctor_sem
);
2743 spin_lock(&sci
->sc_state_lock
);
2744 nilfs_segctor_kill_thread(sci
);
2745 flag
= ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) || sci
->sc_flush_request
2746 || sci
->sc_seq_request
!= sci
->sc_seq_done
);
2747 spin_unlock(&sci
->sc_state_lock
);
2749 if (flag
|| !nilfs_segctor_confirm(sci
))
2750 nilfs_segctor_write_out(sci
);
2752 WARN_ON(!list_empty(&sci
->sc_copied_buffers
));
2754 if (!list_empty(&sci
->sc_dirty_files
)) {
2755 nilfs_warning(sbi
->s_super
, __func__
,
2756 "dirty file(s) after the final construction\n");
2757 nilfs_dispose_list(sbi
, &sci
->sc_dirty_files
, 1);
2760 WARN_ON(!list_empty(&sci
->sc_segbufs
));
2761 WARN_ON(!list_empty(&sci
->sc_write_logs
));
2763 nilfs_put_root(sci
->sc_root
);
2765 down_write(&sbi
->s_nilfs
->ns_segctor_sem
);
2767 del_timer_sync(&sci
->sc_timer
);
2772 * nilfs_attach_segment_constructor - attach a segment constructor
2773 * @sbi: nilfs_sb_info
2774 * @root: root object of the current filesystem tree
2776 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
2777 * initializes it, and starts the segment constructor.
2779 * Return Value: On success, 0 is returned. On error, one of the following
2780 * negative error code is returned.
2782 * %-ENOMEM - Insufficient memory available.
2784 int nilfs_attach_segment_constructor(struct nilfs_sb_info
*sbi
,
2785 struct nilfs_root
*root
)
2789 if (NILFS_SC(sbi
)) {
2791 * This happens if the filesystem was remounted
2792 * read/write after nilfs_error degenerated it into a
2795 nilfs_detach_segment_constructor(sbi
);
2798 sbi
->s_sc_info
= nilfs_segctor_new(sbi
, root
);
2799 if (!sbi
->s_sc_info
)
2802 err
= nilfs_segctor_start_thread(NILFS_SC(sbi
));
2804 kfree(sbi
->s_sc_info
);
2805 sbi
->s_sc_info
= NULL
;
2811 * nilfs_detach_segment_constructor - destroy the segment constructor
2812 * @sbi: nilfs_sb_info
2814 * nilfs_detach_segment_constructor() kills the segment constructor daemon,
2815 * frees the struct nilfs_sc_info, and destroy the dirty file list.
2817 void nilfs_detach_segment_constructor(struct nilfs_sb_info
*sbi
)
2819 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2820 LIST_HEAD(garbage_list
);
2822 down_write(&nilfs
->ns_segctor_sem
);
2823 if (NILFS_SC(sbi
)) {
2824 nilfs_segctor_destroy(NILFS_SC(sbi
));
2825 sbi
->s_sc_info
= NULL
;
2828 /* Force to free the list of dirty files */
2829 spin_lock(&sbi
->s_inode_lock
);
2830 if (!list_empty(&sbi
->s_dirty_files
)) {
2831 list_splice_init(&sbi
->s_dirty_files
, &garbage_list
);
2832 nilfs_warning(sbi
->s_super
, __func__
,
2833 "Non empty dirty list after the last "
2834 "segment construction\n");
2836 spin_unlock(&sbi
->s_inode_lock
);
2837 up_write(&nilfs
->ns_segctor_sem
);
2839 nilfs_dispose_list(sbi
, &garbage_list
, 1);