4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data
)
28 struct f2fs_sb_info
*sbi
= data
;
29 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
30 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
33 wait_ms
= gc_th
->min_sleep_time
;
39 wait_event_interruptible_timeout(*wq
,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms
));
42 if (kthread_should_stop())
45 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
46 increase_sleep_time(gc_th
, &wait_ms
);
50 #ifdef CONFIG_F2FS_FAULT_INJECTION
51 if (time_to_inject(sbi
, FAULT_CHECKPOINT
)) {
52 f2fs_show_injection_info(FAULT_CHECKPOINT
);
53 f2fs_stop_checkpoint(sbi
, false);
58 * [GC triggering condition]
59 * 0. GC is not conducted currently.
60 * 1. There are enough dirty segments.
61 * 2. IO subsystem is idle by checking the # of writeback pages.
62 * 3. IO subsystem is idle by checking the # of requests in
63 * bdev's request list.
65 * Note) We have to avoid triggering GCs frequently.
66 * Because it is possible that some segments can be
67 * invalidated soon after by user update or deletion.
68 * So, I'd like to wait some time to collect dirty segments.
70 if (!mutex_trylock(&sbi
->gc_mutex
))
74 increase_sleep_time(gc_th
, &wait_ms
);
75 mutex_unlock(&sbi
->gc_mutex
);
79 if (has_enough_invalid_blocks(sbi
))
80 decrease_sleep_time(gc_th
, &wait_ms
);
82 increase_sleep_time(gc_th
, &wait_ms
);
84 stat_inc_bggc_count(sbi
);
86 /* if return value is not zero, no victim was selected */
87 if (f2fs_gc(sbi
, test_opt(sbi
, FORCE_FG_GC
), true))
88 wait_ms
= gc_th
->no_gc_sleep_time
;
90 trace_f2fs_background_gc(sbi
->sb
, wait_ms
,
91 prefree_segments(sbi
), free_segments(sbi
));
93 /* balancing f2fs's metadata periodically */
94 f2fs_balance_fs_bg(sbi
);
96 } while (!kthread_should_stop());
100 int start_gc_thread(struct f2fs_sb_info
*sbi
)
102 struct f2fs_gc_kthread
*gc_th
;
103 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
106 gc_th
= f2fs_kmalloc(sbi
, sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
112 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
113 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
114 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
118 sbi
->gc_thread
= gc_th
;
119 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
120 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
121 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
122 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
123 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
125 sbi
->gc_thread
= NULL
;
131 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
133 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
136 kthread_stop(gc_th
->f2fs_gc_task
);
138 sbi
->gc_thread
= NULL
;
141 static int select_gc_type(struct f2fs_gc_kthread
*gc_th
, int gc_type
)
143 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
145 if (gc_th
&& gc_th
->gc_idle
) {
146 if (gc_th
->gc_idle
== 1)
148 else if (gc_th
->gc_idle
== 2)
154 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
155 int type
, struct victim_sel_policy
*p
)
157 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
159 if (p
->alloc_mode
== SSR
) {
160 p
->gc_mode
= GC_GREEDY
;
161 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
162 p
->max_search
= dirty_i
->nr_dirty
[type
];
165 p
->gc_mode
= select_gc_type(sbi
->gc_thread
, gc_type
);
166 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
167 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
168 p
->ofs_unit
= sbi
->segs_per_sec
;
171 /* we need to check every dirty segments in the FG_GC case */
172 if (gc_type
!= FG_GC
&& p
->max_search
> sbi
->max_victim_search
)
173 p
->max_search
= sbi
->max_victim_search
;
175 p
->offset
= sbi
->last_victim
[p
->gc_mode
];
178 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
179 struct victim_sel_policy
*p
)
181 /* SSR allocates in a segment unit */
182 if (p
->alloc_mode
== SSR
)
183 return sbi
->blocks_per_seg
;
184 if (p
->gc_mode
== GC_GREEDY
)
185 return sbi
->blocks_per_seg
* p
->ofs_unit
;
186 else if (p
->gc_mode
== GC_CB
)
188 else /* No other gc_mode */
192 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
194 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
198 * If the gc_type is FG_GC, we can select victim segments
199 * selected by background GC before.
200 * Those segments guarantee they have small valid blocks.
202 for_each_set_bit(secno
, dirty_i
->victim_secmap
, MAIN_SECS(sbi
)) {
203 if (sec_usage_check(sbi
, secno
))
206 if (no_fggc_candidate(sbi
, secno
))
209 clear_bit(secno
, dirty_i
->victim_secmap
);
210 return secno
* sbi
->segs_per_sec
;
215 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
217 struct sit_info
*sit_i
= SIT_I(sbi
);
218 unsigned int secno
= GET_SECNO(sbi
, segno
);
219 unsigned int start
= secno
* sbi
->segs_per_sec
;
220 unsigned long long mtime
= 0;
221 unsigned int vblocks
;
222 unsigned char age
= 0;
226 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
227 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
228 vblocks
= get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
230 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
231 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
233 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
235 /* Handle if the system time has changed by the user */
236 if (mtime
< sit_i
->min_mtime
)
237 sit_i
->min_mtime
= mtime
;
238 if (mtime
> sit_i
->max_mtime
)
239 sit_i
->max_mtime
= mtime
;
240 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
241 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
242 sit_i
->max_mtime
- sit_i
->min_mtime
);
244 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
247 static unsigned int get_greedy_cost(struct f2fs_sb_info
*sbi
,
250 unsigned int valid_blocks
=
251 get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
253 return IS_DATASEG(get_seg_entry(sbi
, segno
)->type
) ?
254 valid_blocks
* 2 : valid_blocks
;
257 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
258 unsigned int segno
, struct victim_sel_policy
*p
)
260 if (p
->alloc_mode
== SSR
)
261 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
263 /* alloc_mode == LFS */
264 if (p
->gc_mode
== GC_GREEDY
)
265 return get_greedy_cost(sbi
, segno
);
267 return get_cb_cost(sbi
, segno
);
270 static unsigned int count_bits(const unsigned long *addr
,
271 unsigned int offset
, unsigned int len
)
273 unsigned int end
= offset
+ len
, sum
= 0;
275 while (offset
< end
) {
276 if (test_bit(offset
++, addr
))
283 * This function is called from two paths.
284 * One is garbage collection and the other is SSR segment selection.
285 * When it is called during GC, it just gets a victim segment
286 * and it does not remove it from dirty seglist.
287 * When it is called from SSR segment selection, it finds a segment
288 * which has minimum valid blocks and removes it from dirty seglist.
290 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
291 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
293 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
294 struct victim_sel_policy p
;
295 unsigned int secno
, last_victim
;
296 unsigned int last_segment
= MAIN_SEGS(sbi
);
297 unsigned int nsearched
= 0;
299 mutex_lock(&dirty_i
->seglist_lock
);
301 p
.alloc_mode
= alloc_mode
;
302 select_policy(sbi
, gc_type
, type
, &p
);
304 p
.min_segno
= NULL_SEGNO
;
305 p
.min_cost
= get_max_cost(sbi
, &p
);
307 if (p
.max_search
== 0)
310 last_victim
= sbi
->last_victim
[p
.gc_mode
];
311 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
312 p
.min_segno
= check_bg_victims(sbi
);
313 if (p
.min_segno
!= NULL_SEGNO
)
321 segno
= find_next_bit(p
.dirty_segmap
, last_segment
, p
.offset
);
322 if (segno
>= last_segment
) {
323 if (sbi
->last_victim
[p
.gc_mode
]) {
324 last_segment
= sbi
->last_victim
[p
.gc_mode
];
325 sbi
->last_victim
[p
.gc_mode
] = 0;
332 p
.offset
= segno
+ p
.ofs_unit
;
333 if (p
.ofs_unit
> 1) {
334 p
.offset
-= segno
% p
.ofs_unit
;
335 nsearched
+= count_bits(p
.dirty_segmap
,
336 p
.offset
- p
.ofs_unit
,
342 secno
= GET_SECNO(sbi
, segno
);
344 if (sec_usage_check(sbi
, secno
))
346 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
348 if (gc_type
== FG_GC
&& p
.alloc_mode
== LFS
&&
349 no_fggc_candidate(sbi
, secno
))
352 cost
= get_gc_cost(sbi
, segno
, &p
);
354 if (p
.min_cost
> cost
) {
359 if (nsearched
>= p
.max_search
) {
360 if (!sbi
->last_victim
[p
.gc_mode
] && segno
<= last_victim
)
361 sbi
->last_victim
[p
.gc_mode
] = last_victim
+ 1;
363 sbi
->last_victim
[p
.gc_mode
] = segno
+ 1;
367 if (p
.min_segno
!= NULL_SEGNO
) {
369 if (p
.alloc_mode
== LFS
) {
370 secno
= GET_SECNO(sbi
, p
.min_segno
);
371 if (gc_type
== FG_GC
)
372 sbi
->cur_victim_sec
= secno
;
374 set_bit(secno
, dirty_i
->victim_secmap
);
376 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
378 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
380 prefree_segments(sbi
), free_segments(sbi
));
383 mutex_unlock(&dirty_i
->seglist_lock
);
385 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
388 static const struct victim_selection default_v_ops
= {
389 .get_victim
= get_victim_by_default
,
392 static struct inode
*find_gc_inode(struct gc_inode_list
*gc_list
, nid_t ino
)
394 struct inode_entry
*ie
;
396 ie
= radix_tree_lookup(&gc_list
->iroot
, ino
);
402 static void add_gc_inode(struct gc_inode_list
*gc_list
, struct inode
*inode
)
404 struct inode_entry
*new_ie
;
406 if (inode
== find_gc_inode(gc_list
, inode
->i_ino
)) {
410 new_ie
= f2fs_kmem_cache_alloc(inode_entry_slab
, GFP_NOFS
);
411 new_ie
->inode
= inode
;
413 f2fs_radix_tree_insert(&gc_list
->iroot
, inode
->i_ino
, new_ie
);
414 list_add_tail(&new_ie
->list
, &gc_list
->ilist
);
417 static void put_gc_inode(struct gc_inode_list
*gc_list
)
419 struct inode_entry
*ie
, *next_ie
;
420 list_for_each_entry_safe(ie
, next_ie
, &gc_list
->ilist
, list
) {
421 radix_tree_delete(&gc_list
->iroot
, ie
->inode
->i_ino
);
424 kmem_cache_free(inode_entry_slab
, ie
);
428 static int check_valid_map(struct f2fs_sb_info
*sbi
,
429 unsigned int segno
, int offset
)
431 struct sit_info
*sit_i
= SIT_I(sbi
);
432 struct seg_entry
*sentry
;
435 mutex_lock(&sit_i
->sentry_lock
);
436 sentry
= get_seg_entry(sbi
, segno
);
437 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
438 mutex_unlock(&sit_i
->sentry_lock
);
443 * This function compares node address got in summary with that in NAT.
444 * On validity, copy that node with cold status, otherwise (invalid node)
447 static void gc_node_segment(struct f2fs_sb_info
*sbi
,
448 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
450 struct f2fs_summary
*entry
;
455 start_addr
= START_BLOCK(sbi
, segno
);
460 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
461 nid_t nid
= le32_to_cpu(entry
->nid
);
462 struct page
*node_page
;
465 /* stop BG_GC if there is not enough free sections. */
466 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
469 if (check_valid_map(sbi
, segno
, off
) == 0)
473 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
479 ra_node_page(sbi
, nid
);
484 node_page
= get_node_page(sbi
, nid
);
485 if (IS_ERR(node_page
))
488 /* block may become invalid during get_node_page */
489 if (check_valid_map(sbi
, segno
, off
) == 0) {
490 f2fs_put_page(node_page
, 1);
494 get_node_info(sbi
, nid
, &ni
);
495 if (ni
.blk_addr
!= start_addr
+ off
) {
496 f2fs_put_page(node_page
, 1);
500 move_node_page(node_page
, gc_type
);
501 stat_inc_node_blk_count(sbi
, 1, gc_type
);
509 * Calculate start block index indicating the given node offset.
510 * Be careful, caller should give this node offset only indicating direct node
511 * blocks. If any node offsets, which point the other types of node blocks such
512 * as indirect or double indirect node blocks, are given, it must be a caller's
515 block_t
start_bidx_of_node(unsigned int node_ofs
, struct inode
*inode
)
517 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
525 } else if (node_ofs
<= indirect_blks
) {
526 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
527 bidx
= node_ofs
- 2 - dec
;
529 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
530 bidx
= node_ofs
- 5 - dec
;
532 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE(inode
);
535 static bool is_alive(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
536 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
538 struct page
*node_page
;
540 unsigned int ofs_in_node
;
541 block_t source_blkaddr
;
543 nid
= le32_to_cpu(sum
->nid
);
544 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
546 node_page
= get_node_page(sbi
, nid
);
547 if (IS_ERR(node_page
))
550 get_node_info(sbi
, nid
, dni
);
552 if (sum
->version
!= dni
->version
) {
553 f2fs_put_page(node_page
, 1);
557 *nofs
= ofs_of_node(node_page
);
558 source_blkaddr
= datablock_addr(node_page
, ofs_in_node
);
559 f2fs_put_page(node_page
, 1);
561 if (source_blkaddr
!= blkaddr
)
566 static void move_encrypted_block(struct inode
*inode
, block_t bidx
,
567 unsigned int segno
, int off
)
569 struct f2fs_io_info fio
= {
570 .sbi
= F2FS_I_SB(inode
),
574 .encrypted_page
= NULL
,
576 struct dnode_of_data dn
;
577 struct f2fs_summary sum
;
583 /* do not read out */
584 page
= f2fs_grab_cache_page(inode
->i_mapping
, bidx
, false);
588 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
))
591 if (f2fs_is_atomic_file(inode
))
594 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
595 err
= get_dnode_of_data(&dn
, bidx
, LOOKUP_NODE
);
599 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
600 ClearPageUptodate(page
);
605 * don't cache encrypted data into meta inode until previous dirty
606 * data were writebacked to avoid racing between GC and flush.
608 f2fs_wait_on_page_writeback(page
, DATA
, true);
610 get_node_info(fio
.sbi
, dn
.nid
, &ni
);
611 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
615 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
617 allocate_data_block(fio
.sbi
, NULL
, fio
.old_blkaddr
, &newaddr
,
618 &sum
, CURSEG_COLD_DATA
);
620 fio
.encrypted_page
= pagecache_get_page(META_MAPPING(fio
.sbi
), newaddr
,
621 FGP_LOCK
| FGP_CREAT
, GFP_NOFS
);
622 if (!fio
.encrypted_page
) {
627 err
= f2fs_submit_page_bio(&fio
);
632 lock_page(fio
.encrypted_page
);
634 if (unlikely(fio
.encrypted_page
->mapping
!= META_MAPPING(fio
.sbi
))) {
638 if (unlikely(!PageUptodate(fio
.encrypted_page
))) {
643 set_page_dirty(fio
.encrypted_page
);
644 f2fs_wait_on_page_writeback(fio
.encrypted_page
, DATA
, true);
645 if (clear_page_dirty_for_io(fio
.encrypted_page
))
646 dec_page_count(fio
.sbi
, F2FS_DIRTY_META
);
648 set_page_writeback(fio
.encrypted_page
);
650 /* allocate block address */
651 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true);
653 fio
.op
= REQ_OP_WRITE
;
654 fio
.op_flags
= REQ_SYNC
;
655 fio
.new_blkaddr
= newaddr
;
656 f2fs_submit_page_mbio(&fio
);
658 f2fs_update_data_blkaddr(&dn
, newaddr
);
659 set_inode_flag(inode
, FI_APPEND_WRITE
);
660 if (page
->index
== 0)
661 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
663 f2fs_put_page(fio
.encrypted_page
, 1);
666 __f2fs_replace_block(fio
.sbi
, &sum
, newaddr
, fio
.old_blkaddr
,
671 f2fs_put_page(page
, 1);
674 static void move_data_page(struct inode
*inode
, block_t bidx
, int gc_type
,
675 unsigned int segno
, int off
)
679 page
= get_lock_data_page(inode
, bidx
, true);
683 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
))
686 if (f2fs_is_atomic_file(inode
))
689 if (gc_type
== BG_GC
) {
690 if (PageWriteback(page
))
692 set_page_dirty(page
);
695 struct f2fs_io_info fio
= {
696 .sbi
= F2FS_I_SB(inode
),
699 .op_flags
= REQ_SYNC
,
701 .encrypted_page
= NULL
,
703 bool is_dirty
= PageDirty(page
);
707 set_page_dirty(page
);
708 f2fs_wait_on_page_writeback(page
, DATA
, true);
709 if (clear_page_dirty_for_io(page
)) {
710 inode_dec_dirty_pages(inode
);
711 remove_dirty_inode(inode
);
716 err
= do_write_data_page(&fio
);
717 if (err
== -ENOMEM
&& is_dirty
) {
718 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
723 f2fs_put_page(page
, 1);
727 * This function tries to get parent node of victim data block, and identifies
728 * data block validity. If the block is valid, copy that with cold status and
729 * modify parent node.
730 * If the parent node is not valid or the data block address is different,
731 * the victim data block is ignored.
733 static void gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
734 struct gc_inode_list
*gc_list
, unsigned int segno
, int gc_type
)
736 struct super_block
*sb
= sbi
->sb
;
737 struct f2fs_summary
*entry
;
742 start_addr
= START_BLOCK(sbi
, segno
);
747 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
748 struct page
*data_page
;
750 struct node_info dni
; /* dnode info for the data */
751 unsigned int ofs_in_node
, nofs
;
753 nid_t nid
= le32_to_cpu(entry
->nid
);
755 /* stop BG_GC if there is not enough free sections. */
756 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
759 if (check_valid_map(sbi
, segno
, off
) == 0)
763 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
769 ra_node_page(sbi
, nid
);
773 /* Get an inode by ino with checking validity */
774 if (!is_alive(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
))
778 ra_node_page(sbi
, dni
.ino
);
782 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
785 inode
= f2fs_iget(sb
, dni
.ino
);
786 if (IS_ERR(inode
) || is_bad_inode(inode
))
789 /* if encrypted inode, let's go phase 3 */
790 if (f2fs_encrypted_inode(inode
) &&
791 S_ISREG(inode
->i_mode
)) {
792 add_gc_inode(gc_list
, inode
);
796 start_bidx
= start_bidx_of_node(nofs
, inode
);
797 data_page
= get_read_data_page(inode
,
798 start_bidx
+ ofs_in_node
, REQ_RAHEAD
,
800 if (IS_ERR(data_page
)) {
805 f2fs_put_page(data_page
, 0);
806 add_gc_inode(gc_list
, inode
);
811 inode
= find_gc_inode(gc_list
, dni
.ino
);
813 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
816 if (S_ISREG(inode
->i_mode
)) {
817 if (!down_write_trylock(&fi
->dio_rwsem
[READ
]))
819 if (!down_write_trylock(
820 &fi
->dio_rwsem
[WRITE
])) {
821 up_write(&fi
->dio_rwsem
[READ
]);
827 start_bidx
= start_bidx_of_node(nofs
, inode
)
829 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
830 move_encrypted_block(inode
, start_bidx
, segno
, off
);
832 move_data_page(inode
, start_bidx
, gc_type
, segno
, off
);
835 up_write(&fi
->dio_rwsem
[WRITE
]);
836 up_write(&fi
->dio_rwsem
[READ
]);
839 stat_inc_data_blk_count(sbi
, 1, gc_type
);
847 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
850 struct sit_info
*sit_i
= SIT_I(sbi
);
853 mutex_lock(&sit_i
->sentry_lock
);
854 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
,
856 mutex_unlock(&sit_i
->sentry_lock
);
860 static int do_garbage_collect(struct f2fs_sb_info
*sbi
,
861 unsigned int start_segno
,
862 struct gc_inode_list
*gc_list
, int gc_type
)
864 struct page
*sum_page
;
865 struct f2fs_summary_block
*sum
;
866 struct blk_plug plug
;
867 unsigned int segno
= start_segno
;
868 unsigned int end_segno
= start_segno
+ sbi
->segs_per_sec
;
870 unsigned char type
= IS_DATASEG(get_seg_entry(sbi
, segno
)->type
) ?
871 SUM_TYPE_DATA
: SUM_TYPE_NODE
;
873 /* readahead multi ssa blocks those have contiguous address */
874 if (sbi
->segs_per_sec
> 1)
875 ra_meta_pages(sbi
, GET_SUM_BLOCK(sbi
, segno
),
876 sbi
->segs_per_sec
, META_SSA
, true);
878 /* reference all summary page */
879 while (segno
< end_segno
) {
880 sum_page
= get_sum_page(sbi
, segno
++);
881 unlock_page(sum_page
);
884 blk_start_plug(&plug
);
886 for (segno
= start_segno
; segno
< end_segno
; segno
++) {
888 /* find segment summary of victim */
889 sum_page
= find_get_page(META_MAPPING(sbi
),
890 GET_SUM_BLOCK(sbi
, segno
));
891 f2fs_put_page(sum_page
, 0);
893 if (get_valid_blocks(sbi
, segno
, 1) == 0 ||
894 !PageUptodate(sum_page
) ||
895 unlikely(f2fs_cp_error(sbi
)))
898 sum
= page_address(sum_page
);
899 f2fs_bug_on(sbi
, type
!= GET_SUM_TYPE((&sum
->footer
)));
902 * this is to avoid deadlock:
903 * - lock_page(sum_page) - f2fs_replace_block
904 * - check_valid_map() - mutex_lock(sentry_lock)
905 * - mutex_lock(sentry_lock) - change_curseg()
906 * - lock_page(sum_page)
909 if (type
== SUM_TYPE_NODE
)
910 gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
912 gc_data_segment(sbi
, sum
->entries
, gc_list
, segno
,
915 stat_inc_seg_count(sbi
, type
, gc_type
);
917 f2fs_put_page(sum_page
, 0);
920 if (gc_type
== FG_GC
)
921 f2fs_submit_merged_bio(sbi
,
922 (type
== SUM_TYPE_NODE
) ? NODE
: DATA
, WRITE
);
924 blk_finish_plug(&plug
);
926 if (gc_type
== FG_GC
&&
927 get_valid_blocks(sbi
, start_segno
, sbi
->segs_per_sec
) == 0)
930 stat_inc_call_count(sbi
->stat_info
);
935 int f2fs_gc(struct f2fs_sb_info
*sbi
, bool sync
, bool background
)
938 int gc_type
= sync
? FG_GC
: BG_GC
;
941 struct cp_control cpc
;
942 struct gc_inode_list gc_list
= {
943 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
944 .iroot
= RADIX_TREE_INIT(GFP_NOFS
),
947 cpc
.reason
= __get_cp_reason(sbi
);
949 if (unlikely(!(sbi
->sb
->s_flags
& MS_ACTIVE
)))
951 if (unlikely(f2fs_cp_error(sbi
))) {
956 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0)) {
958 * For example, if there are many prefree_segments below given
959 * threshold, we can make them free by checkpoint. Then, we
960 * secure free segments which doesn't need fggc any more.
962 ret
= write_checkpoint(sbi
, &cpc
);
965 if (has_not_enough_free_secs(sbi
, 0, 0))
969 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
970 if (gc_type
== BG_GC
&& !background
)
972 if (!__get_victim(sbi
, &segno
, gc_type
))
976 if (do_garbage_collect(sbi
, segno
, &gc_list
, gc_type
) &&
980 if (gc_type
== FG_GC
)
981 sbi
->cur_victim_sec
= NULL_SEGNO
;
984 if (has_not_enough_free_secs(sbi
, sec_freed
, 0))
987 if (gc_type
== FG_GC
)
988 ret
= write_checkpoint(sbi
, &cpc
);
991 mutex_unlock(&sbi
->gc_mutex
);
993 put_gc_inode(&gc_list
);
996 ret
= sec_freed
? 0 : -EAGAIN
;
1000 void build_gc_manager(struct f2fs_sb_info
*sbi
)
1002 u64 main_count
, resv_count
, ovp_count
, blocks_per_sec
;
1004 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
1006 /* threshold of # of valid blocks in a section for victims of FG_GC */
1007 main_count
= SM_I(sbi
)->main_segments
<< sbi
->log_blocks_per_seg
;
1008 resv_count
= SM_I(sbi
)->reserved_segments
<< sbi
->log_blocks_per_seg
;
1009 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
1010 blocks_per_sec
= sbi
->blocks_per_seg
* sbi
->segs_per_sec
;
1012 sbi
->fggc_threshold
= div64_u64((main_count
- ovp_count
) * blocks_per_sec
,
1013 (main_count
- resv_count
));