1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/module.h>
9 #include <linux/init.h>
11 #include <linux/fs_context.h>
12 #include <linux/sched/mm.h>
13 #include <linux/statfs.h>
14 #include <linux/kthread.h>
15 #include <linux/parser.h>
16 #include <linux/mount.h>
17 #include <linux/seq_file.h>
18 #include <linux/proc_fs.h>
19 #include <linux/random.h>
20 #include <linux/exportfs.h>
21 #include <linux/blkdev.h>
22 #include <linux/quotaops.h>
23 #include <linux/f2fs_fs.h>
24 #include <linux/sysfs.h>
25 #include <linux/quota.h>
26 #include <linux/unicode.h>
27 #include <linux/part_stat.h>
28 #include <linux/zstd.h>
29 #include <linux/lz4.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/f2fs.h>
41 static struct kmem_cache
*f2fs_inode_cachep
;
43 #ifdef CONFIG_F2FS_FAULT_INJECTION
45 const char *f2fs_fault_name
[FAULT_MAX
] = {
46 [FAULT_KMALLOC
] = "kmalloc",
47 [FAULT_KVMALLOC
] = "kvmalloc",
48 [FAULT_PAGE_ALLOC
] = "page alloc",
49 [FAULT_PAGE_GET
] = "page get",
50 [FAULT_ALLOC_NID
] = "alloc nid",
51 [FAULT_ORPHAN
] = "orphan",
52 [FAULT_BLOCK
] = "no more block",
53 [FAULT_DIR_DEPTH
] = "too big dir depth",
54 [FAULT_EVICT_INODE
] = "evict_inode fail",
55 [FAULT_TRUNCATE
] = "truncate fail",
56 [FAULT_READ_IO
] = "read IO error",
57 [FAULT_CHECKPOINT
] = "checkpoint error",
58 [FAULT_DISCARD
] = "discard error",
59 [FAULT_WRITE_IO
] = "write IO error",
60 [FAULT_SLAB_ALLOC
] = "slab alloc",
61 [FAULT_DQUOT_INIT
] = "dquot initialize",
62 [FAULT_LOCK_OP
] = "lock_op",
63 [FAULT_BLKADDR_VALIDITY
] = "invalid blkaddr",
64 [FAULT_BLKADDR_CONSISTENCE
] = "inconsistent blkaddr",
65 [FAULT_NO_SEGMENT
] = "no free segment",
68 int f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
, unsigned long rate
,
71 struct f2fs_fault_info
*ffi
= &F2FS_OPTION(sbi
).fault_info
;
76 atomic_set(&ffi
->inject_ops
, 0);
77 ffi
->inject_rate
= (int)rate
;
81 if (type
>= BIT(FAULT_MAX
))
83 ffi
->inject_type
= (unsigned int)type
;
87 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
90 "build fault injection attr: rate: %lu, type: 0x%lx",
96 /* f2fs-wide shrinker description */
97 static struct shrinker
*f2fs_shrinker_info
;
99 static int __init
f2fs_init_shrinker(void)
101 f2fs_shrinker_info
= shrinker_alloc(0, "f2fs-shrinker");
102 if (!f2fs_shrinker_info
)
105 f2fs_shrinker_info
->count_objects
= f2fs_shrink_count
;
106 f2fs_shrinker_info
->scan_objects
= f2fs_shrink_scan
;
108 shrinker_register(f2fs_shrinker_info
);
113 static void f2fs_exit_shrinker(void)
115 shrinker_free(f2fs_shrinker_info
);
120 Opt_disable_roll_forward
,
131 Opt_disable_ext_identify
,
134 Opt_inline_xattr_size
,
169 Opt_test_dummy_encryption
,
171 Opt_checkpoint_disable
,
172 Opt_checkpoint_disable_cap
,
173 Opt_checkpoint_disable_cap_perc
,
174 Opt_checkpoint_enable
,
175 Opt_checkpoint_merge
,
176 Opt_nocheckpoint_merge
,
177 Opt_compress_algorithm
,
178 Opt_compress_log_size
,
179 Opt_compress_extension
,
180 Opt_nocompress_extension
,
189 Opt_age_extent_cache
,
194 static match_table_t f2fs_tokens
= {
195 {Opt_gc_background
, "background_gc=%s"},
196 {Opt_disable_roll_forward
, "disable_roll_forward"},
197 {Opt_norecovery
, "norecovery"},
198 {Opt_discard
, "discard"},
199 {Opt_nodiscard
, "nodiscard"},
200 {Opt_noheap
, "no_heap"},
202 {Opt_user_xattr
, "user_xattr"},
203 {Opt_nouser_xattr
, "nouser_xattr"},
205 {Opt_noacl
, "noacl"},
206 {Opt_active_logs
, "active_logs=%u"},
207 {Opt_disable_ext_identify
, "disable_ext_identify"},
208 {Opt_inline_xattr
, "inline_xattr"},
209 {Opt_noinline_xattr
, "noinline_xattr"},
210 {Opt_inline_xattr_size
, "inline_xattr_size=%u"},
211 {Opt_inline_data
, "inline_data"},
212 {Opt_inline_dentry
, "inline_dentry"},
213 {Opt_noinline_dentry
, "noinline_dentry"},
214 {Opt_flush_merge
, "flush_merge"},
215 {Opt_noflush_merge
, "noflush_merge"},
216 {Opt_barrier
, "barrier"},
217 {Opt_nobarrier
, "nobarrier"},
218 {Opt_fastboot
, "fastboot"},
219 {Opt_extent_cache
, "extent_cache"},
220 {Opt_noextent_cache
, "noextent_cache"},
221 {Opt_noinline_data
, "noinline_data"},
222 {Opt_data_flush
, "data_flush"},
223 {Opt_reserve_root
, "reserve_root=%u"},
224 {Opt_resgid
, "resgid=%u"},
225 {Opt_resuid
, "resuid=%u"},
226 {Opt_mode
, "mode=%s"},
227 {Opt_fault_injection
, "fault_injection=%u"},
228 {Opt_fault_type
, "fault_type=%u"},
229 {Opt_quota
, "quota"},
230 {Opt_noquota
, "noquota"},
231 {Opt_usrquota
, "usrquota"},
232 {Opt_grpquota
, "grpquota"},
233 {Opt_prjquota
, "prjquota"},
234 {Opt_usrjquota
, "usrjquota=%s"},
235 {Opt_grpjquota
, "grpjquota=%s"},
236 {Opt_prjjquota
, "prjjquota=%s"},
237 {Opt_offusrjquota
, "usrjquota="},
238 {Opt_offgrpjquota
, "grpjquota="},
239 {Opt_offprjjquota
, "prjjquota="},
240 {Opt_jqfmt_vfsold
, "jqfmt=vfsold"},
241 {Opt_jqfmt_vfsv0
, "jqfmt=vfsv0"},
242 {Opt_jqfmt_vfsv1
, "jqfmt=vfsv1"},
243 {Opt_alloc
, "alloc_mode=%s"},
244 {Opt_fsync
, "fsync_mode=%s"},
245 {Opt_test_dummy_encryption
, "test_dummy_encryption=%s"},
246 {Opt_test_dummy_encryption
, "test_dummy_encryption"},
247 {Opt_inlinecrypt
, "inlinecrypt"},
248 {Opt_checkpoint_disable
, "checkpoint=disable"},
249 {Opt_checkpoint_disable_cap
, "checkpoint=disable:%u"},
250 {Opt_checkpoint_disable_cap_perc
, "checkpoint=disable:%u%%"},
251 {Opt_checkpoint_enable
, "checkpoint=enable"},
252 {Opt_checkpoint_merge
, "checkpoint_merge"},
253 {Opt_nocheckpoint_merge
, "nocheckpoint_merge"},
254 {Opt_compress_algorithm
, "compress_algorithm=%s"},
255 {Opt_compress_log_size
, "compress_log_size=%u"},
256 {Opt_compress_extension
, "compress_extension=%s"},
257 {Opt_nocompress_extension
, "nocompress_extension=%s"},
258 {Opt_compress_chksum
, "compress_chksum"},
259 {Opt_compress_mode
, "compress_mode=%s"},
260 {Opt_compress_cache
, "compress_cache"},
262 {Opt_gc_merge
, "gc_merge"},
263 {Opt_nogc_merge
, "nogc_merge"},
264 {Opt_discard_unit
, "discard_unit=%s"},
265 {Opt_memory_mode
, "memory=%s"},
266 {Opt_age_extent_cache
, "age_extent_cache"},
267 {Opt_errors
, "errors=%s"},
271 void f2fs_printk(struct f2fs_sb_info
*sbi
, bool limit_rate
,
272 const char *fmt
, ...)
274 struct va_format vaf
;
280 level
= printk_get_level(fmt
);
281 vaf
.fmt
= printk_skip_level(fmt
);
284 printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
285 KERN_SOH_ASCII
, level
, sbi
->sb
->s_id
, &vaf
);
287 printk("%c%cF2FS-fs (%s): %pV\n",
288 KERN_SOH_ASCII
, level
, sbi
->sb
->s_id
, &vaf
);
293 #if IS_ENABLED(CONFIG_UNICODE)
294 static const struct f2fs_sb_encodings
{
297 unsigned int version
;
298 } f2fs_sb_encoding_map
[] = {
299 {F2FS_ENC_UTF8_12_1
, "utf8", UNICODE_AGE(12, 1, 0)},
302 static const struct f2fs_sb_encodings
*
303 f2fs_sb_read_encoding(const struct f2fs_super_block
*sb
)
305 __u16 magic
= le16_to_cpu(sb
->s_encoding
);
308 for (i
= 0; i
< ARRAY_SIZE(f2fs_sb_encoding_map
); i
++)
309 if (magic
== f2fs_sb_encoding_map
[i
].magic
)
310 return &f2fs_sb_encoding_map
[i
];
315 struct kmem_cache
*f2fs_cf_name_slab
;
316 static int __init
f2fs_create_casefold_cache(void)
318 f2fs_cf_name_slab
= f2fs_kmem_cache_create("f2fs_casefolded_name",
320 return f2fs_cf_name_slab
? 0 : -ENOMEM
;
323 static void f2fs_destroy_casefold_cache(void)
325 kmem_cache_destroy(f2fs_cf_name_slab
);
328 static int __init
f2fs_create_casefold_cache(void) { return 0; }
329 static void f2fs_destroy_casefold_cache(void) { }
332 static inline void limit_reserve_root(struct f2fs_sb_info
*sbi
)
334 block_t limit
= min((sbi
->user_block_count
>> 3),
335 sbi
->user_block_count
- sbi
->reserved_blocks
);
338 if (test_opt(sbi
, RESERVE_ROOT
) &&
339 F2FS_OPTION(sbi
).root_reserved_blocks
> limit
) {
340 F2FS_OPTION(sbi
).root_reserved_blocks
= limit
;
341 f2fs_info(sbi
, "Reduce reserved blocks for root = %u",
342 F2FS_OPTION(sbi
).root_reserved_blocks
);
344 if (!test_opt(sbi
, RESERVE_ROOT
) &&
345 (!uid_eq(F2FS_OPTION(sbi
).s_resuid
,
346 make_kuid(&init_user_ns
, F2FS_DEF_RESUID
)) ||
347 !gid_eq(F2FS_OPTION(sbi
).s_resgid
,
348 make_kgid(&init_user_ns
, F2FS_DEF_RESGID
))))
349 f2fs_info(sbi
, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
350 from_kuid_munged(&init_user_ns
,
351 F2FS_OPTION(sbi
).s_resuid
),
352 from_kgid_munged(&init_user_ns
,
353 F2FS_OPTION(sbi
).s_resgid
));
356 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info
*sbi
)
358 if (!F2FS_OPTION(sbi
).unusable_cap_perc
)
361 if (F2FS_OPTION(sbi
).unusable_cap_perc
== 100)
362 F2FS_OPTION(sbi
).unusable_cap
= sbi
->user_block_count
;
364 F2FS_OPTION(sbi
).unusable_cap
= (sbi
->user_block_count
/ 100) *
365 F2FS_OPTION(sbi
).unusable_cap_perc
;
367 f2fs_info(sbi
, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
368 F2FS_OPTION(sbi
).unusable_cap
,
369 F2FS_OPTION(sbi
).unusable_cap_perc
);
372 static void init_once(void *foo
)
374 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
376 inode_init_once(&fi
->vfs_inode
);
380 static const char * const quotatypes
[] = INITQFNAMES
;
381 #define QTYPE2NAME(t) (quotatypes[t])
382 static int f2fs_set_qf_name(struct super_block
*sb
, int qtype
,
385 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
389 if (sb_any_quota_loaded(sb
) && !F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
390 f2fs_err(sbi
, "Cannot change journaled quota options when quota turned on");
393 if (f2fs_sb_has_quota_ino(sbi
)) {
394 f2fs_info(sbi
, "QUOTA feature is enabled, so ignore qf_name");
398 qname
= match_strdup(args
);
400 f2fs_err(sbi
, "Not enough memory for storing quotafile name");
403 if (F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
404 if (strcmp(F2FS_OPTION(sbi
).s_qf_names
[qtype
], qname
) == 0)
407 f2fs_err(sbi
, "%s quota file already specified",
411 if (strchr(qname
, '/')) {
412 f2fs_err(sbi
, "quotafile must be on filesystem root");
415 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = qname
;
423 static int f2fs_clear_qf_name(struct super_block
*sb
, int qtype
)
425 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
427 if (sb_any_quota_loaded(sb
) && F2FS_OPTION(sbi
).s_qf_names
[qtype
]) {
428 f2fs_err(sbi
, "Cannot change journaled quota options when quota turned on");
431 kfree(F2FS_OPTION(sbi
).s_qf_names
[qtype
]);
432 F2FS_OPTION(sbi
).s_qf_names
[qtype
] = NULL
;
436 static int f2fs_check_quota_options(struct f2fs_sb_info
*sbi
)
439 * We do the test below only for project quotas. 'usrquota' and
440 * 'grpquota' mount options are allowed even without quota feature
441 * to support legacy quotas in quota files.
443 if (test_opt(sbi
, PRJQUOTA
) && !f2fs_sb_has_project_quota(sbi
)) {
444 f2fs_err(sbi
, "Project quota feature not enabled. Cannot enable project quota enforcement.");
447 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
] ||
448 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
] ||
449 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]) {
450 if (test_opt(sbi
, USRQUOTA
) &&
451 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
452 clear_opt(sbi
, USRQUOTA
);
454 if (test_opt(sbi
, GRPQUOTA
) &&
455 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
456 clear_opt(sbi
, GRPQUOTA
);
458 if (test_opt(sbi
, PRJQUOTA
) &&
459 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
460 clear_opt(sbi
, PRJQUOTA
);
462 if (test_opt(sbi
, GRPQUOTA
) || test_opt(sbi
, USRQUOTA
) ||
463 test_opt(sbi
, PRJQUOTA
)) {
464 f2fs_err(sbi
, "old and new quota format mixing");
468 if (!F2FS_OPTION(sbi
).s_jquota_fmt
) {
469 f2fs_err(sbi
, "journaled quota format not specified");
474 if (f2fs_sb_has_quota_ino(sbi
) && F2FS_OPTION(sbi
).s_jquota_fmt
) {
475 f2fs_info(sbi
, "QUOTA feature is enabled, so ignore jquota_fmt");
476 F2FS_OPTION(sbi
).s_jquota_fmt
= 0;
482 static int f2fs_set_test_dummy_encryption(struct super_block
*sb
,
484 const substring_t
*arg
,
487 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
488 struct fs_parameter param
= {
489 .type
= fs_value_is_string
,
490 .string
= arg
->from
? arg
->from
: "",
492 struct fscrypt_dummy_policy
*policy
=
493 &F2FS_OPTION(sbi
).dummy_enc_policy
;
496 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION
)) {
497 f2fs_warn(sbi
, "test_dummy_encryption option not supported");
501 if (!f2fs_sb_has_encrypt(sbi
)) {
502 f2fs_err(sbi
, "Encrypt feature is off");
507 * This mount option is just for testing, and it's not worthwhile to
508 * implement the extra complexity (e.g. RCU protection) that would be
509 * needed to allow it to be set or changed during remount. We do allow
510 * it to be specified during remount, but only if there is no change.
512 if (is_remount
&& !fscrypt_is_dummy_policy_set(policy
)) {
513 f2fs_warn(sbi
, "Can't set test_dummy_encryption on remount");
517 err
= fscrypt_parse_test_dummy_encryption(¶m
, policy
);
521 "Can't change test_dummy_encryption on remount");
522 else if (err
== -EINVAL
)
523 f2fs_warn(sbi
, "Value of option \"%s\" is unrecognized",
526 f2fs_warn(sbi
, "Error processing option \"%s\" [%d]",
530 f2fs_warn(sbi
, "Test dummy encryption mode enabled");
534 #ifdef CONFIG_F2FS_FS_COMPRESSION
535 static bool is_compress_extension_exist(struct f2fs_sb_info
*sbi
,
536 const char *new_ext
, bool is_ext
)
538 unsigned char (*ext
)[F2FS_EXTENSION_LEN
];
543 ext
= F2FS_OPTION(sbi
).extensions
;
544 ext_cnt
= F2FS_OPTION(sbi
).compress_ext_cnt
;
546 ext
= F2FS_OPTION(sbi
).noextensions
;
547 ext_cnt
= F2FS_OPTION(sbi
).nocompress_ext_cnt
;
550 for (i
= 0; i
< ext_cnt
; i
++) {
551 if (!strcasecmp(new_ext
, ext
[i
]))
559 * 1. The same extension name cannot not appear in both compress and non-compress extension
561 * 2. If the compress extension specifies all files, the types specified by the non-compress
562 * extension will be treated as special cases and will not be compressed.
563 * 3. Don't allow the non-compress extension specifies all files.
565 static int f2fs_test_compress_extension(struct f2fs_sb_info
*sbi
)
567 unsigned char (*ext
)[F2FS_EXTENSION_LEN
];
568 unsigned char (*noext
)[F2FS_EXTENSION_LEN
];
569 int ext_cnt
, noext_cnt
, index
= 0, no_index
= 0;
571 ext
= F2FS_OPTION(sbi
).extensions
;
572 ext_cnt
= F2FS_OPTION(sbi
).compress_ext_cnt
;
573 noext
= F2FS_OPTION(sbi
).noextensions
;
574 noext_cnt
= F2FS_OPTION(sbi
).nocompress_ext_cnt
;
579 for (no_index
= 0; no_index
< noext_cnt
; no_index
++) {
580 if (!strcasecmp("*", noext
[no_index
])) {
581 f2fs_info(sbi
, "Don't allow the nocompress extension specifies all files");
584 for (index
= 0; index
< ext_cnt
; index
++) {
585 if (!strcasecmp(ext
[index
], noext
[no_index
])) {
586 f2fs_info(sbi
, "Don't allow the same extension %s appear in both compress and nocompress extension",
595 #ifdef CONFIG_F2FS_FS_LZ4
596 static int f2fs_set_lz4hc_level(struct f2fs_sb_info
*sbi
, const char *str
)
598 #ifdef CONFIG_F2FS_FS_LZ4HC
601 if (strlen(str
) == 3) {
602 F2FS_OPTION(sbi
).compress_level
= 0;
609 f2fs_info(sbi
, "wrong format, e.g. <alg_name>:<compr_level>");
612 if (kstrtouint(str
+ 1, 10, &level
))
615 if (!f2fs_is_compress_level_valid(COMPRESS_LZ4
, level
)) {
616 f2fs_info(sbi
, "invalid lz4hc compress level: %d", level
);
620 F2FS_OPTION(sbi
).compress_level
= level
;
623 if (strlen(str
) == 3) {
624 F2FS_OPTION(sbi
).compress_level
= 0;
627 f2fs_info(sbi
, "kernel doesn't support lz4hc compression");
633 #ifdef CONFIG_F2FS_FS_ZSTD
634 static int f2fs_set_zstd_level(struct f2fs_sb_info
*sbi
, const char *str
)
639 if (strlen(str
) == len
) {
640 F2FS_OPTION(sbi
).compress_level
= F2FS_ZSTD_DEFAULT_CLEVEL
;
647 f2fs_info(sbi
, "wrong format, e.g. <alg_name>:<compr_level>");
650 if (kstrtoint(str
+ 1, 10, &level
))
653 /* f2fs does not support negative compress level now */
655 f2fs_info(sbi
, "do not support negative compress level: %d", level
);
659 if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD
, level
)) {
660 f2fs_info(sbi
, "invalid zstd compress level: %d", level
);
664 F2FS_OPTION(sbi
).compress_level
= level
;
670 static int parse_options(struct super_block
*sb
, char *options
, bool is_remount
)
672 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
673 substring_t args
[MAX_OPT_ARGS
];
674 #ifdef CONFIG_F2FS_FS_COMPRESSION
675 unsigned char (*ext
)[F2FS_EXTENSION_LEN
];
676 unsigned char (*noext
)[F2FS_EXTENSION_LEN
];
677 int ext_cnt
, noext_cnt
;
688 while ((p
= strsep(&options
, ",")) != NULL
) {
694 * Initialize args struct so we know whether arg was
695 * found; some options take optional arguments.
697 args
[0].to
= args
[0].from
= NULL
;
698 token
= match_token(p
, f2fs_tokens
, args
);
701 case Opt_gc_background
:
702 name
= match_strdup(&args
[0]);
706 if (!strcmp(name
, "on")) {
707 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_ON
;
708 } else if (!strcmp(name
, "off")) {
709 if (f2fs_sb_has_blkzoned(sbi
)) {
710 f2fs_warn(sbi
, "zoned devices need bggc");
714 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_OFF
;
715 } else if (!strcmp(name
, "sync")) {
716 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_SYNC
;
723 case Opt_disable_roll_forward
:
724 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
727 /* this option mounts f2fs with ro */
728 set_opt(sbi
, NORECOVERY
);
729 if (!f2fs_readonly(sb
))
733 if (!f2fs_hw_support_discard(sbi
)) {
734 f2fs_warn(sbi
, "device does not support discard");
737 set_opt(sbi
, DISCARD
);
740 if (f2fs_hw_should_discard(sbi
)) {
741 f2fs_warn(sbi
, "discard is required for zoned block devices");
744 clear_opt(sbi
, DISCARD
);
748 f2fs_warn(sbi
, "heap/no_heap options were deprecated");
750 #ifdef CONFIG_F2FS_FS_XATTR
752 set_opt(sbi
, XATTR_USER
);
754 case Opt_nouser_xattr
:
755 clear_opt(sbi
, XATTR_USER
);
757 case Opt_inline_xattr
:
758 set_opt(sbi
, INLINE_XATTR
);
760 case Opt_noinline_xattr
:
761 clear_opt(sbi
, INLINE_XATTR
);
763 case Opt_inline_xattr_size
:
764 if (args
->from
&& match_int(args
, &arg
))
766 set_opt(sbi
, INLINE_XATTR_SIZE
);
767 F2FS_OPTION(sbi
).inline_xattr_size
= arg
;
771 f2fs_info(sbi
, "user_xattr options not supported");
773 case Opt_nouser_xattr
:
774 f2fs_info(sbi
, "nouser_xattr options not supported");
776 case Opt_inline_xattr
:
777 f2fs_info(sbi
, "inline_xattr options not supported");
779 case Opt_noinline_xattr
:
780 f2fs_info(sbi
, "noinline_xattr options not supported");
783 #ifdef CONFIG_F2FS_FS_POSIX_ACL
785 set_opt(sbi
, POSIX_ACL
);
788 clear_opt(sbi
, POSIX_ACL
);
792 f2fs_info(sbi
, "acl options not supported");
795 f2fs_info(sbi
, "noacl options not supported");
798 case Opt_active_logs
:
799 if (args
->from
&& match_int(args
, &arg
))
801 if (arg
!= 2 && arg
!= 4 &&
802 arg
!= NR_CURSEG_PERSIST_TYPE
)
804 F2FS_OPTION(sbi
).active_logs
= arg
;
806 case Opt_disable_ext_identify
:
807 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
809 case Opt_inline_data
:
810 set_opt(sbi
, INLINE_DATA
);
812 case Opt_inline_dentry
:
813 set_opt(sbi
, INLINE_DENTRY
);
815 case Opt_noinline_dentry
:
816 clear_opt(sbi
, INLINE_DENTRY
);
818 case Opt_flush_merge
:
819 set_opt(sbi
, FLUSH_MERGE
);
821 case Opt_noflush_merge
:
822 clear_opt(sbi
, FLUSH_MERGE
);
825 set_opt(sbi
, NOBARRIER
);
828 clear_opt(sbi
, NOBARRIER
);
831 set_opt(sbi
, FASTBOOT
);
833 case Opt_extent_cache
:
834 set_opt(sbi
, READ_EXTENT_CACHE
);
836 case Opt_noextent_cache
:
837 clear_opt(sbi
, READ_EXTENT_CACHE
);
839 case Opt_noinline_data
:
840 clear_opt(sbi
, INLINE_DATA
);
843 set_opt(sbi
, DATA_FLUSH
);
845 case Opt_reserve_root
:
846 if (args
->from
&& match_int(args
, &arg
))
848 if (test_opt(sbi
, RESERVE_ROOT
)) {
849 f2fs_info(sbi
, "Preserve previous reserve_root=%u",
850 F2FS_OPTION(sbi
).root_reserved_blocks
);
852 F2FS_OPTION(sbi
).root_reserved_blocks
= arg
;
853 set_opt(sbi
, RESERVE_ROOT
);
857 if (args
->from
&& match_int(args
, &arg
))
859 uid
= make_kuid(current_user_ns(), arg
);
860 if (!uid_valid(uid
)) {
861 f2fs_err(sbi
, "Invalid uid value %d", arg
);
864 F2FS_OPTION(sbi
).s_resuid
= uid
;
867 if (args
->from
&& match_int(args
, &arg
))
869 gid
= make_kgid(current_user_ns(), arg
);
870 if (!gid_valid(gid
)) {
871 f2fs_err(sbi
, "Invalid gid value %d", arg
);
874 F2FS_OPTION(sbi
).s_resgid
= gid
;
877 name
= match_strdup(&args
[0]);
881 if (!strcmp(name
, "adaptive")) {
882 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_ADAPTIVE
;
883 } else if (!strcmp(name
, "lfs")) {
884 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_LFS
;
885 } else if (!strcmp(name
, "fragment:segment")) {
886 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_FRAGMENT_SEG
;
887 } else if (!strcmp(name
, "fragment:block")) {
888 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_FRAGMENT_BLK
;
895 #ifdef CONFIG_F2FS_FAULT_INJECTION
896 case Opt_fault_injection
:
897 if (args
->from
&& match_int(args
, &arg
))
899 if (f2fs_build_fault_attr(sbi
, arg
,
900 F2FS_ALL_FAULT_TYPE
))
902 set_opt(sbi
, FAULT_INJECTION
);
906 if (args
->from
&& match_int(args
, &arg
))
908 if (f2fs_build_fault_attr(sbi
, 0, arg
))
910 set_opt(sbi
, FAULT_INJECTION
);
913 case Opt_fault_injection
:
914 f2fs_info(sbi
, "fault_injection options not supported");
918 f2fs_info(sbi
, "fault_type options not supported");
924 set_opt(sbi
, USRQUOTA
);
927 set_opt(sbi
, GRPQUOTA
);
930 set_opt(sbi
, PRJQUOTA
);
933 ret
= f2fs_set_qf_name(sb
, USRQUOTA
, &args
[0]);
938 ret
= f2fs_set_qf_name(sb
, GRPQUOTA
, &args
[0]);
943 ret
= f2fs_set_qf_name(sb
, PRJQUOTA
, &args
[0]);
947 case Opt_offusrjquota
:
948 ret
= f2fs_clear_qf_name(sb
, USRQUOTA
);
952 case Opt_offgrpjquota
:
953 ret
= f2fs_clear_qf_name(sb
, GRPQUOTA
);
957 case Opt_offprjjquota
:
958 ret
= f2fs_clear_qf_name(sb
, PRJQUOTA
);
962 case Opt_jqfmt_vfsold
:
963 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_OLD
;
965 case Opt_jqfmt_vfsv0
:
966 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V0
;
968 case Opt_jqfmt_vfsv1
:
969 F2FS_OPTION(sbi
).s_jquota_fmt
= QFMT_VFS_V1
;
972 clear_opt(sbi
, QUOTA
);
973 clear_opt(sbi
, USRQUOTA
);
974 clear_opt(sbi
, GRPQUOTA
);
975 clear_opt(sbi
, PRJQUOTA
);
985 case Opt_offusrjquota
:
986 case Opt_offgrpjquota
:
987 case Opt_offprjjquota
:
988 case Opt_jqfmt_vfsold
:
989 case Opt_jqfmt_vfsv0
:
990 case Opt_jqfmt_vfsv1
:
992 f2fs_info(sbi
, "quota operations not supported");
996 name
= match_strdup(&args
[0]);
1000 if (!strcmp(name
, "default")) {
1001 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
1002 } else if (!strcmp(name
, "reuse")) {
1003 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
1011 name
= match_strdup(&args
[0]);
1014 if (!strcmp(name
, "posix")) {
1015 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
1016 } else if (!strcmp(name
, "strict")) {
1017 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_STRICT
;
1018 } else if (!strcmp(name
, "nobarrier")) {
1019 F2FS_OPTION(sbi
).fsync_mode
=
1020 FSYNC_MODE_NOBARRIER
;
1027 case Opt_test_dummy_encryption
:
1028 ret
= f2fs_set_test_dummy_encryption(sb
, p
, &args
[0],
1033 case Opt_inlinecrypt
:
1034 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1035 sb
->s_flags
|= SB_INLINECRYPT
;
1037 f2fs_info(sbi
, "inline encryption not supported");
1040 case Opt_checkpoint_disable_cap_perc
:
1041 if (args
->from
&& match_int(args
, &arg
))
1043 if (arg
< 0 || arg
> 100)
1045 F2FS_OPTION(sbi
).unusable_cap_perc
= arg
;
1046 set_opt(sbi
, DISABLE_CHECKPOINT
);
1048 case Opt_checkpoint_disable_cap
:
1049 if (args
->from
&& match_int(args
, &arg
))
1051 F2FS_OPTION(sbi
).unusable_cap
= arg
;
1052 set_opt(sbi
, DISABLE_CHECKPOINT
);
1054 case Opt_checkpoint_disable
:
1055 set_opt(sbi
, DISABLE_CHECKPOINT
);
1057 case Opt_checkpoint_enable
:
1058 clear_opt(sbi
, DISABLE_CHECKPOINT
);
1060 case Opt_checkpoint_merge
:
1061 set_opt(sbi
, MERGE_CHECKPOINT
);
1063 case Opt_nocheckpoint_merge
:
1064 clear_opt(sbi
, MERGE_CHECKPOINT
);
1066 #ifdef CONFIG_F2FS_FS_COMPRESSION
1067 case Opt_compress_algorithm
:
1068 if (!f2fs_sb_has_compression(sbi
)) {
1069 f2fs_info(sbi
, "Image doesn't support compression");
1072 name
= match_strdup(&args
[0]);
1075 if (!strcmp(name
, "lzo")) {
1076 #ifdef CONFIG_F2FS_FS_LZO
1077 F2FS_OPTION(sbi
).compress_level
= 0;
1078 F2FS_OPTION(sbi
).compress_algorithm
=
1081 f2fs_info(sbi
, "kernel doesn't support lzo compression");
1083 } else if (!strncmp(name
, "lz4", 3)) {
1084 #ifdef CONFIG_F2FS_FS_LZ4
1085 ret
= f2fs_set_lz4hc_level(sbi
, name
);
1090 F2FS_OPTION(sbi
).compress_algorithm
=
1093 f2fs_info(sbi
, "kernel doesn't support lz4 compression");
1095 } else if (!strncmp(name
, "zstd", 4)) {
1096 #ifdef CONFIG_F2FS_FS_ZSTD
1097 ret
= f2fs_set_zstd_level(sbi
, name
);
1102 F2FS_OPTION(sbi
).compress_algorithm
=
1105 f2fs_info(sbi
, "kernel doesn't support zstd compression");
1107 } else if (!strcmp(name
, "lzo-rle")) {
1108 #ifdef CONFIG_F2FS_FS_LZORLE
1109 F2FS_OPTION(sbi
).compress_level
= 0;
1110 F2FS_OPTION(sbi
).compress_algorithm
=
1113 f2fs_info(sbi
, "kernel doesn't support lzorle compression");
1121 case Opt_compress_log_size
:
1122 if (!f2fs_sb_has_compression(sbi
)) {
1123 f2fs_info(sbi
, "Image doesn't support compression");
1126 if (args
->from
&& match_int(args
, &arg
))
1128 if (arg
< MIN_COMPRESS_LOG_SIZE
||
1129 arg
> MAX_COMPRESS_LOG_SIZE
) {
1131 "Compress cluster log size is out of range");
1134 F2FS_OPTION(sbi
).compress_log_size
= arg
;
1136 case Opt_compress_extension
:
1137 if (!f2fs_sb_has_compression(sbi
)) {
1138 f2fs_info(sbi
, "Image doesn't support compression");
1141 name
= match_strdup(&args
[0]);
1145 ext
= F2FS_OPTION(sbi
).extensions
;
1146 ext_cnt
= F2FS_OPTION(sbi
).compress_ext_cnt
;
1148 if (strlen(name
) >= F2FS_EXTENSION_LEN
||
1149 ext_cnt
>= COMPRESS_EXT_NUM
) {
1151 "invalid extension length/number");
1156 if (is_compress_extension_exist(sbi
, name
, true)) {
1161 strcpy(ext
[ext_cnt
], name
);
1162 F2FS_OPTION(sbi
).compress_ext_cnt
++;
1165 case Opt_nocompress_extension
:
1166 if (!f2fs_sb_has_compression(sbi
)) {
1167 f2fs_info(sbi
, "Image doesn't support compression");
1170 name
= match_strdup(&args
[0]);
1174 noext
= F2FS_OPTION(sbi
).noextensions
;
1175 noext_cnt
= F2FS_OPTION(sbi
).nocompress_ext_cnt
;
1177 if (strlen(name
) >= F2FS_EXTENSION_LEN
||
1178 noext_cnt
>= COMPRESS_EXT_NUM
) {
1180 "invalid extension length/number");
1185 if (is_compress_extension_exist(sbi
, name
, false)) {
1190 strcpy(noext
[noext_cnt
], name
);
1191 F2FS_OPTION(sbi
).nocompress_ext_cnt
++;
1194 case Opt_compress_chksum
:
1195 if (!f2fs_sb_has_compression(sbi
)) {
1196 f2fs_info(sbi
, "Image doesn't support compression");
1199 F2FS_OPTION(sbi
).compress_chksum
= true;
1201 case Opt_compress_mode
:
1202 if (!f2fs_sb_has_compression(sbi
)) {
1203 f2fs_info(sbi
, "Image doesn't support compression");
1206 name
= match_strdup(&args
[0]);
1209 if (!strcmp(name
, "fs")) {
1210 F2FS_OPTION(sbi
).compress_mode
= COMPR_MODE_FS
;
1211 } else if (!strcmp(name
, "user")) {
1212 F2FS_OPTION(sbi
).compress_mode
= COMPR_MODE_USER
;
1219 case Opt_compress_cache
:
1220 if (!f2fs_sb_has_compression(sbi
)) {
1221 f2fs_info(sbi
, "Image doesn't support compression");
1224 set_opt(sbi
, COMPRESS_CACHE
);
1227 case Opt_compress_algorithm
:
1228 case Opt_compress_log_size
:
1229 case Opt_compress_extension
:
1230 case Opt_nocompress_extension
:
1231 case Opt_compress_chksum
:
1232 case Opt_compress_mode
:
1233 case Opt_compress_cache
:
1234 f2fs_info(sbi
, "compression options not supported");
1241 set_opt(sbi
, GC_MERGE
);
1243 case Opt_nogc_merge
:
1244 clear_opt(sbi
, GC_MERGE
);
1246 case Opt_discard_unit
:
1247 name
= match_strdup(&args
[0]);
1250 if (!strcmp(name
, "block")) {
1251 F2FS_OPTION(sbi
).discard_unit
=
1253 } else if (!strcmp(name
, "segment")) {
1254 F2FS_OPTION(sbi
).discard_unit
=
1255 DISCARD_UNIT_SEGMENT
;
1256 } else if (!strcmp(name
, "section")) {
1257 F2FS_OPTION(sbi
).discard_unit
=
1258 DISCARD_UNIT_SECTION
;
1265 case Opt_memory_mode
:
1266 name
= match_strdup(&args
[0]);
1269 if (!strcmp(name
, "normal")) {
1270 F2FS_OPTION(sbi
).memory_mode
=
1272 } else if (!strcmp(name
, "low")) {
1273 F2FS_OPTION(sbi
).memory_mode
=
1281 case Opt_age_extent_cache
:
1282 set_opt(sbi
, AGE_EXTENT_CACHE
);
1285 name
= match_strdup(&args
[0]);
1288 if (!strcmp(name
, "remount-ro")) {
1289 F2FS_OPTION(sbi
).errors
=
1290 MOUNT_ERRORS_READONLY
;
1291 } else if (!strcmp(name
, "continue")) {
1292 F2FS_OPTION(sbi
).errors
=
1293 MOUNT_ERRORS_CONTINUE
;
1294 } else if (!strcmp(name
, "panic")) {
1295 F2FS_OPTION(sbi
).errors
=
1304 f2fs_err(sbi
, "Unrecognized mount option \"%s\" or missing value",
1311 if (f2fs_check_quota_options(sbi
))
1314 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sbi
->sb
)) {
1315 f2fs_info(sbi
, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1318 if (f2fs_sb_has_project_quota(sbi
) && !f2fs_readonly(sbi
->sb
)) {
1319 f2fs_err(sbi
, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1324 if (!IS_ENABLED(CONFIG_UNICODE
) && f2fs_sb_has_casefold(sbi
)) {
1326 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1331 * The BLKZONED feature indicates that the drive was formatted with
1332 * zone alignment optimization. This is optional for host-aware
1333 * devices, but mandatory for host-managed zoned block devices.
1335 if (f2fs_sb_has_blkzoned(sbi
)) {
1336 #ifdef CONFIG_BLK_DEV_ZONED
1337 if (F2FS_OPTION(sbi
).discard_unit
!=
1338 DISCARD_UNIT_SECTION
) {
1339 f2fs_info(sbi
, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1340 F2FS_OPTION(sbi
).discard_unit
=
1341 DISCARD_UNIT_SECTION
;
1344 if (F2FS_OPTION(sbi
).fs_mode
!= FS_MODE_LFS
) {
1345 f2fs_info(sbi
, "Only lfs mode is allowed with zoned block device feature");
1349 f2fs_err(sbi
, "Zoned block device support is not enabled");
1354 #ifdef CONFIG_F2FS_FS_COMPRESSION
1355 if (f2fs_test_compress_extension(sbi
)) {
1356 f2fs_err(sbi
, "invalid compress or nocompress extension");
1361 if (test_opt(sbi
, INLINE_XATTR_SIZE
)) {
1362 int min_size
, max_size
;
1364 if (!f2fs_sb_has_extra_attr(sbi
) ||
1365 !f2fs_sb_has_flexible_inline_xattr(sbi
)) {
1366 f2fs_err(sbi
, "extra_attr or flexible_inline_xattr feature is off");
1369 if (!test_opt(sbi
, INLINE_XATTR
)) {
1370 f2fs_err(sbi
, "inline_xattr_size option should be set with inline_xattr option");
1374 min_size
= MIN_INLINE_XATTR_SIZE
;
1375 max_size
= MAX_INLINE_XATTR_SIZE
;
1377 if (F2FS_OPTION(sbi
).inline_xattr_size
< min_size
||
1378 F2FS_OPTION(sbi
).inline_xattr_size
> max_size
) {
1379 f2fs_err(sbi
, "inline xattr size is out of range: %d ~ %d",
1380 min_size
, max_size
);
1385 if (test_opt(sbi
, ATGC
) && f2fs_lfs_mode(sbi
)) {
1386 f2fs_err(sbi
, "LFS is not compatible with ATGC");
1390 if (f2fs_is_readonly(sbi
) && test_opt(sbi
, FLUSH_MERGE
)) {
1391 f2fs_err(sbi
, "FLUSH_MERGE not compatible with readonly mode");
1395 if (f2fs_sb_has_readonly(sbi
) && !f2fs_readonly(sbi
->sb
)) {
1396 f2fs_err(sbi
, "Allow to mount readonly mode only");
1402 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
1404 struct f2fs_inode_info
*fi
;
1406 if (time_to_inject(F2FS_SB(sb
), FAULT_SLAB_ALLOC
))
1409 fi
= alloc_inode_sb(sb
, f2fs_inode_cachep
, GFP_F2FS_ZERO
);
1413 init_once((void *) fi
);
1415 /* Initialize f2fs-specific inode info */
1416 atomic_set(&fi
->dirty_pages
, 0);
1417 atomic_set(&fi
->i_compr_blocks
, 0);
1418 init_f2fs_rwsem(&fi
->i_sem
);
1419 spin_lock_init(&fi
->i_size_lock
);
1420 INIT_LIST_HEAD(&fi
->dirty_list
);
1421 INIT_LIST_HEAD(&fi
->gdirty_list
);
1422 init_f2fs_rwsem(&fi
->i_gc_rwsem
[READ
]);
1423 init_f2fs_rwsem(&fi
->i_gc_rwsem
[WRITE
]);
1424 init_f2fs_rwsem(&fi
->i_xattr_sem
);
1426 /* Will be used by directory only */
1427 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
1429 return &fi
->vfs_inode
;
1432 static int f2fs_drop_inode(struct inode
*inode
)
1434 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1438 * during filesystem shutdown, if checkpoint is disabled,
1439 * drop useless meta/node dirty pages.
1441 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
1442 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
1443 inode
->i_ino
== F2FS_META_INO(sbi
)) {
1444 trace_f2fs_drop_inode(inode
, 1);
1450 * This is to avoid a deadlock condition like below.
1451 * writeback_single_inode(inode)
1452 * - f2fs_write_data_page
1453 * - f2fs_gc -> iput -> evict
1454 * - inode_wait_for_writeback(inode)
1456 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
1457 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
1458 /* to avoid evict_inode call simultaneously */
1459 atomic_inc(&inode
->i_count
);
1460 spin_unlock(&inode
->i_lock
);
1462 /* should remain fi->extent_tree for writepage */
1463 f2fs_destroy_extent_node(inode
);
1465 sb_start_intwrite(inode
->i_sb
);
1466 f2fs_i_size_write(inode
, 0);
1468 f2fs_submit_merged_write_cond(F2FS_I_SB(inode
),
1469 inode
, NULL
, 0, DATA
);
1470 truncate_inode_pages_final(inode
->i_mapping
);
1472 if (F2FS_HAS_BLOCKS(inode
))
1473 f2fs_truncate(inode
);
1475 sb_end_intwrite(inode
->i_sb
);
1477 spin_lock(&inode
->i_lock
);
1478 atomic_dec(&inode
->i_count
);
1480 trace_f2fs_drop_inode(inode
, 0);
1483 ret
= generic_drop_inode(inode
);
1485 ret
= fscrypt_drop_inode(inode
);
1486 trace_f2fs_drop_inode(inode
, ret
);
1490 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
1492 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1495 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1496 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
1499 set_inode_flag(inode
, FI_DIRTY_INODE
);
1500 stat_inc_dirty_inode(sbi
, DIRTY_META
);
1502 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
1503 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
1504 &sbi
->inode_list
[DIRTY_META
]);
1505 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
1507 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1511 void f2fs_inode_synced(struct inode
*inode
)
1513 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1515 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1516 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
1517 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1520 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
1521 list_del_init(&F2FS_I(inode
)->gdirty_list
);
1522 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
1524 clear_inode_flag(inode
, FI_DIRTY_INODE
);
1525 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1526 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
1527 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1531 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1533 * We should call set_dirty_inode to write the dirty inode through write_inode.
1535 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
1537 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1539 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
1540 inode
->i_ino
== F2FS_META_INO(sbi
))
1543 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
1544 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
1546 f2fs_inode_dirtied(inode
, false);
1549 static void f2fs_free_inode(struct inode
*inode
)
1551 fscrypt_free_inode(inode
);
1552 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
1555 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
1557 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
1558 percpu_counter_destroy(&sbi
->rf_node_block_count
);
1559 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
1562 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
1566 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
1568 bdev_fput(FDEV(i
).bdev_file
);
1569 #ifdef CONFIG_BLK_DEV_ZONED
1570 kvfree(FDEV(i
).blkz_seq
);
1576 static void f2fs_put_super(struct super_block
*sb
)
1578 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1583 /* unregister procfs/sysfs entries in advance to avoid race case */
1584 f2fs_unregister_sysfs(sbi
);
1586 f2fs_quota_off_umount(sb
);
1588 /* prevent remaining shrinker jobs */
1589 mutex_lock(&sbi
->umount_mutex
);
1592 * flush all issued checkpoints and stop checkpoint issue thread.
1593 * after then, all checkpoints should be done by each process context.
1595 f2fs_stop_ckpt_thread(sbi
);
1598 * We don't need to do checkpoint when superblock is clean.
1599 * But, the previous checkpoint was not done by umount, it needs to do
1600 * clean checkpoint again.
1602 if ((is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
1603 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
))) {
1604 struct cp_control cpc
= {
1605 .reason
= CP_UMOUNT
,
1607 stat_inc_cp_call_count(sbi
, TOTAL_CALL
);
1608 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1611 /* be sure to wait for any on-going discard commands */
1612 done
= f2fs_issue_discard_timeout(sbi
);
1613 if (f2fs_realtime_discard_enable(sbi
) && !sbi
->discard_blks
&& done
) {
1614 struct cp_control cpc
= {
1615 .reason
= CP_UMOUNT
| CP_TRIMMED
,
1617 stat_inc_cp_call_count(sbi
, TOTAL_CALL
);
1618 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1622 * normally superblock is clean, so we need to release this.
1623 * In addition, EIO will skip do checkpoint, we need this as well.
1625 f2fs_release_ino_entry(sbi
, true);
1627 f2fs_leave_shrinker(sbi
);
1628 mutex_unlock(&sbi
->umount_mutex
);
1630 /* our cp_error case, we can wait for any writeback page */
1631 f2fs_flush_merged_writes(sbi
);
1633 f2fs_wait_on_all_pages(sbi
, F2FS_WB_CP_DATA
);
1635 if (err
|| f2fs_cp_error(sbi
)) {
1636 truncate_inode_pages_final(NODE_MAPPING(sbi
));
1637 truncate_inode_pages_final(META_MAPPING(sbi
));
1640 for (i
= 0; i
< NR_COUNT_TYPE
; i
++) {
1641 if (!get_pages(sbi
, i
))
1643 f2fs_err(sbi
, "detect filesystem reference count leak during "
1644 "umount, type: %d, count: %lld", i
, get_pages(sbi
, i
));
1645 f2fs_bug_on(sbi
, 1);
1648 f2fs_bug_on(sbi
, sbi
->fsync_node_num
);
1650 f2fs_destroy_compress_inode(sbi
);
1652 iput(sbi
->node_inode
);
1653 sbi
->node_inode
= NULL
;
1655 iput(sbi
->meta_inode
);
1656 sbi
->meta_inode
= NULL
;
1659 * iput() can update stat information, if f2fs_write_checkpoint()
1660 * above failed with error.
1662 f2fs_destroy_stats(sbi
);
1664 /* destroy f2fs internal modules */
1665 f2fs_destroy_node_manager(sbi
);
1666 f2fs_destroy_segment_manager(sbi
);
1668 /* flush s_error_work before sbi destroy */
1669 flush_work(&sbi
->s_error_work
);
1671 f2fs_destroy_post_read_wq(sbi
);
1675 if (sbi
->s_chksum_driver
)
1676 crypto_free_shash(sbi
->s_chksum_driver
);
1677 kfree(sbi
->raw_super
);
1679 f2fs_destroy_page_array_cache(sbi
);
1680 f2fs_destroy_xattr_caches(sbi
);
1682 for (i
= 0; i
< MAXQUOTAS
; i
++)
1683 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
1685 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi
).dummy_enc_policy
);
1686 destroy_percpu_info(sbi
);
1687 f2fs_destroy_iostat(sbi
);
1688 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
1689 kvfree(sbi
->write_io
[i
]);
1690 #if IS_ENABLED(CONFIG_UNICODE)
1691 utf8_unload(sb
->s_encoding
);
1695 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
1697 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1700 if (unlikely(f2fs_cp_error(sbi
)))
1702 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
1705 trace_f2fs_sync_fs(sb
, sync
);
1707 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1711 stat_inc_cp_call_count(sbi
, TOTAL_CALL
);
1712 err
= f2fs_issue_checkpoint(sbi
);
1718 static int f2fs_freeze(struct super_block
*sb
)
1720 if (f2fs_readonly(sb
))
1723 /* IO error happened before */
1724 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
1727 /* must be clean, since sync_filesystem() was already called */
1728 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
1731 /* Let's flush checkpoints and stop the thread. */
1732 f2fs_flush_ckpt_thread(F2FS_SB(sb
));
1734 /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
1735 set_sbi_flag(F2FS_SB(sb
), SBI_IS_FREEZING
);
1739 static int f2fs_unfreeze(struct super_block
*sb
)
1741 clear_sbi_flag(F2FS_SB(sb
), SBI_IS_FREEZING
);
1746 static int f2fs_statfs_project(struct super_block
*sb
,
1747 kprojid_t projid
, struct kstatfs
*buf
)
1750 struct dquot
*dquot
;
1754 qid
= make_kqid_projid(projid
);
1755 dquot
= dqget(sb
, qid
);
1757 return PTR_ERR(dquot
);
1758 spin_lock(&dquot
->dq_dqb_lock
);
1760 limit
= min_not_zero(dquot
->dq_dqb
.dqb_bsoftlimit
,
1761 dquot
->dq_dqb
.dqb_bhardlimit
);
1763 limit
>>= sb
->s_blocksize_bits
;
1765 if (limit
&& buf
->f_blocks
> limit
) {
1766 curblock
= (dquot
->dq_dqb
.dqb_curspace
+
1767 dquot
->dq_dqb
.dqb_rsvspace
) >> sb
->s_blocksize_bits
;
1768 buf
->f_blocks
= limit
;
1769 buf
->f_bfree
= buf
->f_bavail
=
1770 (buf
->f_blocks
> curblock
) ?
1771 (buf
->f_blocks
- curblock
) : 0;
1774 limit
= min_not_zero(dquot
->dq_dqb
.dqb_isoftlimit
,
1775 dquot
->dq_dqb
.dqb_ihardlimit
);
1777 if (limit
&& buf
->f_files
> limit
) {
1778 buf
->f_files
= limit
;
1780 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
1781 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
1784 spin_unlock(&dquot
->dq_dqb_lock
);
1790 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1792 struct super_block
*sb
= dentry
->d_sb
;
1793 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1794 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
1795 block_t total_count
, user_block_count
, start_count
;
1796 u64 avail_node_count
;
1797 unsigned int total_valid_node_count
;
1799 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
1800 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
1801 buf
->f_type
= F2FS_SUPER_MAGIC
;
1802 buf
->f_bsize
= sbi
->blocksize
;
1804 buf
->f_blocks
= total_count
- start_count
;
1806 spin_lock(&sbi
->stat_lock
);
1808 user_block_count
= sbi
->user_block_count
;
1809 total_valid_node_count
= valid_node_count(sbi
);
1810 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
1811 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) -
1812 sbi
->current_reserved_blocks
;
1814 if (unlikely(buf
->f_bfree
<= sbi
->unusable_block_count
))
1817 buf
->f_bfree
-= sbi
->unusable_block_count
;
1818 spin_unlock(&sbi
->stat_lock
);
1820 if (buf
->f_bfree
> F2FS_OPTION(sbi
).root_reserved_blocks
)
1821 buf
->f_bavail
= buf
->f_bfree
-
1822 F2FS_OPTION(sbi
).root_reserved_blocks
;
1826 if (avail_node_count
> user_block_count
) {
1827 buf
->f_files
= user_block_count
;
1828 buf
->f_ffree
= buf
->f_bavail
;
1830 buf
->f_files
= avail_node_count
;
1831 buf
->f_ffree
= min(avail_node_count
- total_valid_node_count
,
1835 buf
->f_namelen
= F2FS_NAME_LEN
;
1836 buf
->f_fsid
= u64_to_fsid(id
);
1839 if (is_inode_flag_set(dentry
->d_inode
, FI_PROJ_INHERIT
) &&
1840 sb_has_quota_limits_enabled(sb
, PRJQUOTA
)) {
1841 f2fs_statfs_project(sb
, F2FS_I(dentry
->d_inode
)->i_projid
, buf
);
1847 static inline void f2fs_show_quota_options(struct seq_file
*seq
,
1848 struct super_block
*sb
)
1851 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1853 if (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1856 switch (F2FS_OPTION(sbi
).s_jquota_fmt
) {
1867 seq_printf(seq
, ",jqfmt=%s", fmtname
);
1870 if (F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
])
1871 seq_show_option(seq
, "usrjquota",
1872 F2FS_OPTION(sbi
).s_qf_names
[USRQUOTA
]);
1874 if (F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
])
1875 seq_show_option(seq
, "grpjquota",
1876 F2FS_OPTION(sbi
).s_qf_names
[GRPQUOTA
]);
1878 if (F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
])
1879 seq_show_option(seq
, "prjjquota",
1880 F2FS_OPTION(sbi
).s_qf_names
[PRJQUOTA
]);
1884 #ifdef CONFIG_F2FS_FS_COMPRESSION
1885 static inline void f2fs_show_compress_options(struct seq_file
*seq
,
1886 struct super_block
*sb
)
1888 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1892 if (!f2fs_sb_has_compression(sbi
))
1895 switch (F2FS_OPTION(sbi
).compress_algorithm
) {
1905 case COMPRESS_LZORLE
:
1906 algtype
= "lzo-rle";
1909 seq_printf(seq
, ",compress_algorithm=%s", algtype
);
1911 if (F2FS_OPTION(sbi
).compress_level
)
1912 seq_printf(seq
, ":%d", F2FS_OPTION(sbi
).compress_level
);
1914 seq_printf(seq
, ",compress_log_size=%u",
1915 F2FS_OPTION(sbi
).compress_log_size
);
1917 for (i
= 0; i
< F2FS_OPTION(sbi
).compress_ext_cnt
; i
++) {
1918 seq_printf(seq
, ",compress_extension=%s",
1919 F2FS_OPTION(sbi
).extensions
[i
]);
1922 for (i
= 0; i
< F2FS_OPTION(sbi
).nocompress_ext_cnt
; i
++) {
1923 seq_printf(seq
, ",nocompress_extension=%s",
1924 F2FS_OPTION(sbi
).noextensions
[i
]);
1927 if (F2FS_OPTION(sbi
).compress_chksum
)
1928 seq_puts(seq
, ",compress_chksum");
1930 if (F2FS_OPTION(sbi
).compress_mode
== COMPR_MODE_FS
)
1931 seq_printf(seq
, ",compress_mode=%s", "fs");
1932 else if (F2FS_OPTION(sbi
).compress_mode
== COMPR_MODE_USER
)
1933 seq_printf(seq
, ",compress_mode=%s", "user");
1935 if (test_opt(sbi
, COMPRESS_CACHE
))
1936 seq_puts(seq
, ",compress_cache");
1940 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
1942 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
1944 if (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_SYNC
)
1945 seq_printf(seq
, ",background_gc=%s", "sync");
1946 else if (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_ON
)
1947 seq_printf(seq
, ",background_gc=%s", "on");
1948 else if (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_OFF
)
1949 seq_printf(seq
, ",background_gc=%s", "off");
1951 if (test_opt(sbi
, GC_MERGE
))
1952 seq_puts(seq
, ",gc_merge");
1954 seq_puts(seq
, ",nogc_merge");
1956 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
1957 seq_puts(seq
, ",disable_roll_forward");
1958 if (test_opt(sbi
, NORECOVERY
))
1959 seq_puts(seq
, ",norecovery");
1960 if (test_opt(sbi
, DISCARD
)) {
1961 seq_puts(seq
, ",discard");
1962 if (F2FS_OPTION(sbi
).discard_unit
== DISCARD_UNIT_BLOCK
)
1963 seq_printf(seq
, ",discard_unit=%s", "block");
1964 else if (F2FS_OPTION(sbi
).discard_unit
== DISCARD_UNIT_SEGMENT
)
1965 seq_printf(seq
, ",discard_unit=%s", "segment");
1966 else if (F2FS_OPTION(sbi
).discard_unit
== DISCARD_UNIT_SECTION
)
1967 seq_printf(seq
, ",discard_unit=%s", "section");
1969 seq_puts(seq
, ",nodiscard");
1971 #ifdef CONFIG_F2FS_FS_XATTR
1972 if (test_opt(sbi
, XATTR_USER
))
1973 seq_puts(seq
, ",user_xattr");
1975 seq_puts(seq
, ",nouser_xattr");
1976 if (test_opt(sbi
, INLINE_XATTR
))
1977 seq_puts(seq
, ",inline_xattr");
1979 seq_puts(seq
, ",noinline_xattr");
1980 if (test_opt(sbi
, INLINE_XATTR_SIZE
))
1981 seq_printf(seq
, ",inline_xattr_size=%u",
1982 F2FS_OPTION(sbi
).inline_xattr_size
);
1984 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1985 if (test_opt(sbi
, POSIX_ACL
))
1986 seq_puts(seq
, ",acl");
1988 seq_puts(seq
, ",noacl");
1990 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
1991 seq_puts(seq
, ",disable_ext_identify");
1992 if (test_opt(sbi
, INLINE_DATA
))
1993 seq_puts(seq
, ",inline_data");
1995 seq_puts(seq
, ",noinline_data");
1996 if (test_opt(sbi
, INLINE_DENTRY
))
1997 seq_puts(seq
, ",inline_dentry");
1999 seq_puts(seq
, ",noinline_dentry");
2000 if (test_opt(sbi
, FLUSH_MERGE
))
2001 seq_puts(seq
, ",flush_merge");
2003 seq_puts(seq
, ",noflush_merge");
2004 if (test_opt(sbi
, NOBARRIER
))
2005 seq_puts(seq
, ",nobarrier");
2007 seq_puts(seq
, ",barrier");
2008 if (test_opt(sbi
, FASTBOOT
))
2009 seq_puts(seq
, ",fastboot");
2010 if (test_opt(sbi
, READ_EXTENT_CACHE
))
2011 seq_puts(seq
, ",extent_cache");
2013 seq_puts(seq
, ",noextent_cache");
2014 if (test_opt(sbi
, AGE_EXTENT_CACHE
))
2015 seq_puts(seq
, ",age_extent_cache");
2016 if (test_opt(sbi
, DATA_FLUSH
))
2017 seq_puts(seq
, ",data_flush");
2019 seq_puts(seq
, ",mode=");
2020 if (F2FS_OPTION(sbi
).fs_mode
== FS_MODE_ADAPTIVE
)
2021 seq_puts(seq
, "adaptive");
2022 else if (F2FS_OPTION(sbi
).fs_mode
== FS_MODE_LFS
)
2023 seq_puts(seq
, "lfs");
2024 else if (F2FS_OPTION(sbi
).fs_mode
== FS_MODE_FRAGMENT_SEG
)
2025 seq_puts(seq
, "fragment:segment");
2026 else if (F2FS_OPTION(sbi
).fs_mode
== FS_MODE_FRAGMENT_BLK
)
2027 seq_puts(seq
, "fragment:block");
2028 seq_printf(seq
, ",active_logs=%u", F2FS_OPTION(sbi
).active_logs
);
2029 if (test_opt(sbi
, RESERVE_ROOT
))
2030 seq_printf(seq
, ",reserve_root=%u,resuid=%u,resgid=%u",
2031 F2FS_OPTION(sbi
).root_reserved_blocks
,
2032 from_kuid_munged(&init_user_ns
,
2033 F2FS_OPTION(sbi
).s_resuid
),
2034 from_kgid_munged(&init_user_ns
,
2035 F2FS_OPTION(sbi
).s_resgid
));
2036 #ifdef CONFIG_F2FS_FAULT_INJECTION
2037 if (test_opt(sbi
, FAULT_INJECTION
)) {
2038 seq_printf(seq
, ",fault_injection=%u",
2039 F2FS_OPTION(sbi
).fault_info
.inject_rate
);
2040 seq_printf(seq
, ",fault_type=%u",
2041 F2FS_OPTION(sbi
).fault_info
.inject_type
);
2045 if (test_opt(sbi
, QUOTA
))
2046 seq_puts(seq
, ",quota");
2047 if (test_opt(sbi
, USRQUOTA
))
2048 seq_puts(seq
, ",usrquota");
2049 if (test_opt(sbi
, GRPQUOTA
))
2050 seq_puts(seq
, ",grpquota");
2051 if (test_opt(sbi
, PRJQUOTA
))
2052 seq_puts(seq
, ",prjquota");
2054 f2fs_show_quota_options(seq
, sbi
->sb
);
2056 fscrypt_show_test_dummy_encryption(seq
, ',', sbi
->sb
);
2058 if (sbi
->sb
->s_flags
& SB_INLINECRYPT
)
2059 seq_puts(seq
, ",inlinecrypt");
2061 if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_DEFAULT
)
2062 seq_printf(seq
, ",alloc_mode=%s", "default");
2063 else if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_REUSE
)
2064 seq_printf(seq
, ",alloc_mode=%s", "reuse");
2066 if (test_opt(sbi
, DISABLE_CHECKPOINT
))
2067 seq_printf(seq
, ",checkpoint=disable:%u",
2068 F2FS_OPTION(sbi
).unusable_cap
);
2069 if (test_opt(sbi
, MERGE_CHECKPOINT
))
2070 seq_puts(seq
, ",checkpoint_merge");
2072 seq_puts(seq
, ",nocheckpoint_merge");
2073 if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_POSIX
)
2074 seq_printf(seq
, ",fsync_mode=%s", "posix");
2075 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
)
2076 seq_printf(seq
, ",fsync_mode=%s", "strict");
2077 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_NOBARRIER
)
2078 seq_printf(seq
, ",fsync_mode=%s", "nobarrier");
2080 #ifdef CONFIG_F2FS_FS_COMPRESSION
2081 f2fs_show_compress_options(seq
, sbi
->sb
);
2084 if (test_opt(sbi
, ATGC
))
2085 seq_puts(seq
, ",atgc");
2087 if (F2FS_OPTION(sbi
).memory_mode
== MEMORY_MODE_NORMAL
)
2088 seq_printf(seq
, ",memory=%s", "normal");
2089 else if (F2FS_OPTION(sbi
).memory_mode
== MEMORY_MODE_LOW
)
2090 seq_printf(seq
, ",memory=%s", "low");
2092 if (F2FS_OPTION(sbi
).errors
== MOUNT_ERRORS_READONLY
)
2093 seq_printf(seq
, ",errors=%s", "remount-ro");
2094 else if (F2FS_OPTION(sbi
).errors
== MOUNT_ERRORS_CONTINUE
)
2095 seq_printf(seq
, ",errors=%s", "continue");
2096 else if (F2FS_OPTION(sbi
).errors
== MOUNT_ERRORS_PANIC
)
2097 seq_printf(seq
, ",errors=%s", "panic");
2102 static void default_options(struct f2fs_sb_info
*sbi
, bool remount
)
2104 /* init some FS parameters */
2106 set_opt(sbi
, READ_EXTENT_CACHE
);
2107 clear_opt(sbi
, DISABLE_CHECKPOINT
);
2109 if (f2fs_hw_support_discard(sbi
) || f2fs_hw_should_discard(sbi
))
2110 set_opt(sbi
, DISCARD
);
2112 if (f2fs_sb_has_blkzoned(sbi
))
2113 F2FS_OPTION(sbi
).discard_unit
= DISCARD_UNIT_SECTION
;
2115 F2FS_OPTION(sbi
).discard_unit
= DISCARD_UNIT_BLOCK
;
2118 if (f2fs_sb_has_readonly(sbi
))
2119 F2FS_OPTION(sbi
).active_logs
= NR_CURSEG_RO_TYPE
;
2121 F2FS_OPTION(sbi
).active_logs
= NR_CURSEG_PERSIST_TYPE
;
2123 F2FS_OPTION(sbi
).inline_xattr_size
= DEFAULT_INLINE_XATTR_ADDRS
;
2124 if (le32_to_cpu(F2FS_RAW_SUPER(sbi
)->segment_count_main
) <=
2125 SMALL_VOLUME_SEGMENTS
)
2126 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_REUSE
;
2128 F2FS_OPTION(sbi
).alloc_mode
= ALLOC_MODE_DEFAULT
;
2129 F2FS_OPTION(sbi
).fsync_mode
= FSYNC_MODE_POSIX
;
2130 F2FS_OPTION(sbi
).s_resuid
= make_kuid(&init_user_ns
, F2FS_DEF_RESUID
);
2131 F2FS_OPTION(sbi
).s_resgid
= make_kgid(&init_user_ns
, F2FS_DEF_RESGID
);
2132 if (f2fs_sb_has_compression(sbi
)) {
2133 F2FS_OPTION(sbi
).compress_algorithm
= COMPRESS_LZ4
;
2134 F2FS_OPTION(sbi
).compress_log_size
= MIN_COMPRESS_LOG_SIZE
;
2135 F2FS_OPTION(sbi
).compress_ext_cnt
= 0;
2136 F2FS_OPTION(sbi
).compress_mode
= COMPR_MODE_FS
;
2138 F2FS_OPTION(sbi
).bggc_mode
= BGGC_MODE_ON
;
2139 F2FS_OPTION(sbi
).memory_mode
= MEMORY_MODE_NORMAL
;
2140 F2FS_OPTION(sbi
).errors
= MOUNT_ERRORS_CONTINUE
;
2142 set_opt(sbi
, INLINE_XATTR
);
2143 set_opt(sbi
, INLINE_DATA
);
2144 set_opt(sbi
, INLINE_DENTRY
);
2145 set_opt(sbi
, MERGE_CHECKPOINT
);
2146 F2FS_OPTION(sbi
).unusable_cap
= 0;
2147 sbi
->sb
->s_flags
|= SB_LAZYTIME
;
2148 if (!f2fs_is_readonly(sbi
))
2149 set_opt(sbi
, FLUSH_MERGE
);
2150 if (f2fs_sb_has_blkzoned(sbi
))
2151 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_LFS
;
2153 F2FS_OPTION(sbi
).fs_mode
= FS_MODE_ADAPTIVE
;
2155 #ifdef CONFIG_F2FS_FS_XATTR
2156 set_opt(sbi
, XATTR_USER
);
2158 #ifdef CONFIG_F2FS_FS_POSIX_ACL
2159 set_opt(sbi
, POSIX_ACL
);
2162 f2fs_build_fault_attr(sbi
, 0, 0);
2166 static int f2fs_enable_quotas(struct super_block
*sb
);
2169 static int f2fs_disable_checkpoint(struct f2fs_sb_info
*sbi
)
2171 unsigned int s_flags
= sbi
->sb
->s_flags
;
2172 struct cp_control cpc
;
2173 unsigned int gc_mode
= sbi
->gc_mode
;
2178 if (s_flags
& SB_RDONLY
) {
2179 f2fs_err(sbi
, "checkpoint=disable on readonly fs");
2182 sbi
->sb
->s_flags
|= SB_ACTIVE
;
2184 /* check if we need more GC first */
2185 unusable
= f2fs_get_unusable_blocks(sbi
);
2186 if (!f2fs_disable_cp_again(sbi
, unusable
))
2189 f2fs_update_time(sbi
, DISABLE_TIME
);
2191 sbi
->gc_mode
= GC_URGENT_HIGH
;
2193 while (!f2fs_time_over(sbi
, DISABLE_TIME
)) {
2194 struct f2fs_gc_control gc_control
= {
2195 .victim_segno
= NULL_SEGNO
,
2196 .init_gc_type
= FG_GC
,
2197 .should_migrate_blocks
= false,
2198 .err_gc_skipped
= true,
2200 .nr_free_secs
= 1 };
2202 f2fs_down_write(&sbi
->gc_lock
);
2203 stat_inc_gc_call_count(sbi
, FOREGROUND
);
2204 err
= f2fs_gc(sbi
, &gc_control
);
2205 if (err
== -ENODATA
) {
2209 if (err
&& err
!= -EAGAIN
)
2213 ret
= sync_filesystem(sbi
->sb
);
2215 err
= ret
? ret
: err
;
2219 unusable
= f2fs_get_unusable_blocks(sbi
);
2220 if (f2fs_disable_cp_again(sbi
, unusable
)) {
2226 f2fs_down_write(&sbi
->gc_lock
);
2227 cpc
.reason
= CP_PAUSE
;
2228 set_sbi_flag(sbi
, SBI_CP_DISABLED
);
2229 stat_inc_cp_call_count(sbi
, TOTAL_CALL
);
2230 err
= f2fs_write_checkpoint(sbi
, &cpc
);
2234 spin_lock(&sbi
->stat_lock
);
2235 sbi
->unusable_block_count
= unusable
;
2236 spin_unlock(&sbi
->stat_lock
);
2239 f2fs_up_write(&sbi
->gc_lock
);
2241 sbi
->gc_mode
= gc_mode
;
2242 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
2246 static void f2fs_enable_checkpoint(struct f2fs_sb_info
*sbi
)
2248 int retry
= DEFAULT_RETRY_IO_COUNT
;
2250 /* we should flush all the data to keep data consistency */
2252 sync_inodes_sb(sbi
->sb
);
2253 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT
);
2254 } while (get_pages(sbi
, F2FS_DIRTY_DATA
) && retry
--);
2256 if (unlikely(retry
< 0))
2257 f2fs_warn(sbi
, "checkpoint=enable has some unwritten data.");
2259 f2fs_down_write(&sbi
->gc_lock
);
2260 f2fs_dirty_to_prefree(sbi
);
2262 clear_sbi_flag(sbi
, SBI_CP_DISABLED
);
2263 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2264 f2fs_up_write(&sbi
->gc_lock
);
2266 f2fs_sync_fs(sbi
->sb
, 1);
2268 /* Let's ensure there's no pending checkpoint anymore */
2269 f2fs_flush_ckpt_thread(sbi
);
2272 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
2274 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2275 struct f2fs_mount_info org_mount_opt
;
2276 unsigned long old_sb_flags
;
2278 bool need_restart_gc
= false, need_stop_gc
= false;
2279 bool need_restart_flush
= false, need_stop_flush
= false;
2280 bool need_restart_discard
= false, need_stop_discard
= false;
2281 bool need_enable_checkpoint
= false, need_disable_checkpoint
= false;
2282 bool no_read_extent_cache
= !test_opt(sbi
, READ_EXTENT_CACHE
);
2283 bool no_age_extent_cache
= !test_opt(sbi
, AGE_EXTENT_CACHE
);
2284 bool enable_checkpoint
= !test_opt(sbi
, DISABLE_CHECKPOINT
);
2285 bool no_atgc
= !test_opt(sbi
, ATGC
);
2286 bool no_discard
= !test_opt(sbi
, DISCARD
);
2287 bool no_compress_cache
= !test_opt(sbi
, COMPRESS_CACHE
);
2288 bool block_unit_discard
= f2fs_block_unit_discard(sbi
);
2294 * Save the old mount options in case we
2295 * need to restore them.
2297 org_mount_opt
= sbi
->mount_opt
;
2298 old_sb_flags
= sb
->s_flags
;
2301 org_mount_opt
.s_jquota_fmt
= F2FS_OPTION(sbi
).s_jquota_fmt
;
2302 for (i
= 0; i
< MAXQUOTAS
; i
++) {
2303 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
2304 org_mount_opt
.s_qf_names
[i
] =
2305 kstrdup(F2FS_OPTION(sbi
).s_qf_names
[i
],
2307 if (!org_mount_opt
.s_qf_names
[i
]) {
2308 for (j
= 0; j
< i
; j
++)
2309 kfree(org_mount_opt
.s_qf_names
[j
]);
2313 org_mount_opt
.s_qf_names
[i
] = NULL
;
2318 /* recover superblocks we couldn't write due to previous RO mount */
2319 if (!(*flags
& SB_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
2320 err
= f2fs_commit_super(sbi
, false);
2321 f2fs_info(sbi
, "Try to recover all the superblocks, ret: %d",
2324 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
2327 default_options(sbi
, true);
2329 /* parse mount options */
2330 err
= parse_options(sb
, data
, true);
2334 #ifdef CONFIG_BLK_DEV_ZONED
2335 if (f2fs_sb_has_blkzoned(sbi
) &&
2336 sbi
->max_open_zones
< F2FS_OPTION(sbi
).active_logs
) {
2338 "zoned: max open zones %u is too small, need at least %u open zones",
2339 sbi
->max_open_zones
, F2FS_OPTION(sbi
).active_logs
);
2345 /* flush outstanding errors before changing fs state */
2346 flush_work(&sbi
->s_error_work
);
2349 * Previous and new state of filesystem is RO,
2350 * so skip checking GC and FLUSH_MERGE conditions.
2352 if (f2fs_readonly(sb
) && (*flags
& SB_RDONLY
))
2355 if (f2fs_dev_is_readonly(sbi
) && !(*flags
& SB_RDONLY
)) {
2361 if (!f2fs_readonly(sb
) && (*flags
& SB_RDONLY
)) {
2362 err
= dquot_suspend(sb
, -1);
2365 } else if (f2fs_readonly(sb
) && !(*flags
& SB_RDONLY
)) {
2366 /* dquot_resume needs RW */
2367 sb
->s_flags
&= ~SB_RDONLY
;
2368 if (sb_any_quota_suspended(sb
)) {
2369 dquot_resume(sb
, -1);
2370 } else if (f2fs_sb_has_quota_ino(sbi
)) {
2371 err
= f2fs_enable_quotas(sb
);
2377 if (f2fs_lfs_mode(sbi
) && !IS_F2FS_IPU_DISABLE(sbi
)) {
2379 f2fs_warn(sbi
, "LFS is not compatible with IPU");
2383 /* disallow enable atgc dynamically */
2384 if (no_atgc
== !!test_opt(sbi
, ATGC
)) {
2386 f2fs_warn(sbi
, "switch atgc option is not allowed");
2390 /* disallow enable/disable extent_cache dynamically */
2391 if (no_read_extent_cache
== !!test_opt(sbi
, READ_EXTENT_CACHE
)) {
2393 f2fs_warn(sbi
, "switch extent_cache option is not allowed");
2396 /* disallow enable/disable age extent_cache dynamically */
2397 if (no_age_extent_cache
== !!test_opt(sbi
, AGE_EXTENT_CACHE
)) {
2399 f2fs_warn(sbi
, "switch age_extent_cache option is not allowed");
2403 if (no_compress_cache
== !!test_opt(sbi
, COMPRESS_CACHE
)) {
2405 f2fs_warn(sbi
, "switch compress_cache option is not allowed");
2409 if (block_unit_discard
!= f2fs_block_unit_discard(sbi
)) {
2411 f2fs_warn(sbi
, "switch discard_unit option is not allowed");
2415 if ((*flags
& SB_RDONLY
) && test_opt(sbi
, DISABLE_CHECKPOINT
)) {
2417 f2fs_warn(sbi
, "disabling checkpoint not compatible with read-only");
2422 * We stop the GC thread if FS is mounted as RO
2423 * or if background_gc = off is passed in mount
2424 * option. Also sync the filesystem.
2426 if ((*flags
& SB_RDONLY
) ||
2427 (F2FS_OPTION(sbi
).bggc_mode
== BGGC_MODE_OFF
&&
2428 !test_opt(sbi
, GC_MERGE
))) {
2429 if (sbi
->gc_thread
) {
2430 f2fs_stop_gc_thread(sbi
);
2431 need_restart_gc
= true;
2433 } else if (!sbi
->gc_thread
) {
2434 err
= f2fs_start_gc_thread(sbi
);
2437 need_stop_gc
= true;
2440 if (*flags
& SB_RDONLY
) {
2443 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2444 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
2445 f2fs_sync_fs(sb
, 1);
2446 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
2450 * We stop issue flush thread if FS is mounted as RO
2451 * or if flush_merge is not passed in mount option.
2453 if ((*flags
& SB_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
2454 clear_opt(sbi
, FLUSH_MERGE
);
2455 f2fs_destroy_flush_cmd_control(sbi
, false);
2456 need_restart_flush
= true;
2458 err
= f2fs_create_flush_cmd_control(sbi
);
2461 need_stop_flush
= true;
2464 if (no_discard
== !!test_opt(sbi
, DISCARD
)) {
2465 if (test_opt(sbi
, DISCARD
)) {
2466 err
= f2fs_start_discard_thread(sbi
);
2469 need_stop_discard
= true;
2471 f2fs_stop_discard_thread(sbi
);
2472 f2fs_issue_discard_timeout(sbi
);
2473 need_restart_discard
= true;
2477 if (enable_checkpoint
== !!test_opt(sbi
, DISABLE_CHECKPOINT
)) {
2478 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
2479 err
= f2fs_disable_checkpoint(sbi
);
2481 goto restore_discard
;
2482 need_enable_checkpoint
= true;
2484 f2fs_enable_checkpoint(sbi
);
2485 need_disable_checkpoint
= true;
2490 * Place this routine at the end, since a new checkpoint would be
2491 * triggered while remount and we need to take care of it before
2492 * returning from remount.
2494 if ((*flags
& SB_RDONLY
) || test_opt(sbi
, DISABLE_CHECKPOINT
) ||
2495 !test_opt(sbi
, MERGE_CHECKPOINT
)) {
2496 f2fs_stop_ckpt_thread(sbi
);
2498 /* Flush if the prevous checkpoint, if exists. */
2499 f2fs_flush_ckpt_thread(sbi
);
2501 err
= f2fs_start_ckpt_thread(sbi
);
2504 "Failed to start F2FS issue_checkpoint_thread (%d)",
2506 goto restore_checkpoint
;
2512 /* Release old quota file names */
2513 for (i
= 0; i
< MAXQUOTAS
; i
++)
2514 kfree(org_mount_opt
.s_qf_names
[i
]);
2516 /* Update the POSIXACL Flag */
2517 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
2518 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
2520 limit_reserve_root(sbi
);
2521 adjust_unusable_cap_perc(sbi
);
2522 *flags
= (*flags
& ~SB_LAZYTIME
) | (sb
->s_flags
& SB_LAZYTIME
);
2525 if (need_enable_checkpoint
) {
2526 f2fs_enable_checkpoint(sbi
);
2527 } else if (need_disable_checkpoint
) {
2528 if (f2fs_disable_checkpoint(sbi
))
2529 f2fs_warn(sbi
, "checkpoint has not been disabled");
2532 if (need_restart_discard
) {
2533 if (f2fs_start_discard_thread(sbi
))
2534 f2fs_warn(sbi
, "discard has been stopped");
2535 } else if (need_stop_discard
) {
2536 f2fs_stop_discard_thread(sbi
);
2539 if (need_restart_flush
) {
2540 if (f2fs_create_flush_cmd_control(sbi
))
2541 f2fs_warn(sbi
, "background flush thread has stopped");
2542 } else if (need_stop_flush
) {
2543 clear_opt(sbi
, FLUSH_MERGE
);
2544 f2fs_destroy_flush_cmd_control(sbi
, false);
2547 if (need_restart_gc
) {
2548 if (f2fs_start_gc_thread(sbi
))
2549 f2fs_warn(sbi
, "background gc thread has stopped");
2550 } else if (need_stop_gc
) {
2551 f2fs_stop_gc_thread(sbi
);
2555 F2FS_OPTION(sbi
).s_jquota_fmt
= org_mount_opt
.s_jquota_fmt
;
2556 for (i
= 0; i
< MAXQUOTAS
; i
++) {
2557 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
2558 F2FS_OPTION(sbi
).s_qf_names
[i
] = org_mount_opt
.s_qf_names
[i
];
2561 sbi
->mount_opt
= org_mount_opt
;
2562 sb
->s_flags
= old_sb_flags
;
2566 static void f2fs_shutdown(struct super_block
*sb
)
2568 f2fs_do_shutdown(F2FS_SB(sb
), F2FS_GOING_DOWN_NOSYNC
, false, false);
2572 static bool f2fs_need_recovery(struct f2fs_sb_info
*sbi
)
2574 /* need to recovery orphan */
2575 if (is_set_ckpt_flags(sbi
, CP_ORPHAN_PRESENT_FLAG
))
2577 /* need to recovery data */
2578 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
2580 if (test_opt(sbi
, NORECOVERY
))
2582 return !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
);
2585 static bool f2fs_recover_quota_begin(struct f2fs_sb_info
*sbi
)
2587 bool readonly
= f2fs_readonly(sbi
->sb
);
2589 if (!f2fs_need_recovery(sbi
))
2592 /* it doesn't need to check f2fs_sb_has_readonly() */
2593 if (f2fs_hw_is_readonly(sbi
))
2597 sbi
->sb
->s_flags
&= ~SB_RDONLY
;
2598 set_sbi_flag(sbi
, SBI_IS_WRITABLE
);
2602 * Turn on quotas which were not enabled for read-only mounts if
2603 * filesystem has quota feature, so that they are updated correctly.
2605 return f2fs_enable_quota_files(sbi
, readonly
);
2608 static void f2fs_recover_quota_end(struct f2fs_sb_info
*sbi
,
2612 f2fs_quota_off_umount(sbi
->sb
);
2614 if (is_sbi_flag_set(sbi
, SBI_IS_WRITABLE
)) {
2615 clear_sbi_flag(sbi
, SBI_IS_WRITABLE
);
2616 sbi
->sb
->s_flags
|= SB_RDONLY
;
2620 /* Read data from quotafile */
2621 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
2622 size_t len
, loff_t off
)
2624 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2625 struct address_space
*mapping
= inode
->i_mapping
;
2626 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
2627 int offset
= off
& (sb
->s_blocksize
- 1);
2630 loff_t i_size
= i_size_read(inode
);
2636 if (off
+ len
> i_size
)
2639 while (toread
> 0) {
2640 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
2642 page
= read_cache_page_gfp(mapping
, blkidx
, GFP_NOFS
);
2644 if (PTR_ERR(page
) == -ENOMEM
) {
2645 memalloc_retry_wait(GFP_NOFS
);
2648 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2649 return PTR_ERR(page
);
2654 if (unlikely(page
->mapping
!= mapping
)) {
2655 f2fs_put_page(page
, 1);
2658 if (unlikely(!PageUptodate(page
))) {
2659 f2fs_put_page(page
, 1);
2660 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2664 memcpy_from_page(data
, page
, offset
, tocopy
);
2665 f2fs_put_page(page
, 1);
2675 /* Write to quotafile */
2676 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
2677 const char *data
, size_t len
, loff_t off
)
2679 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2680 struct address_space
*mapping
= inode
->i_mapping
;
2681 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
2682 int offset
= off
& (sb
->s_blocksize
- 1);
2683 size_t towrite
= len
;
2684 struct folio
*folio
;
2685 void *fsdata
= NULL
;
2689 while (towrite
> 0) {
2690 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
2693 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
,
2695 if (unlikely(err
)) {
2696 if (err
== -ENOMEM
) {
2697 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT
);
2700 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
2704 memcpy_to_folio(folio
, offset_in_folio(folio
, off
), data
, tocopy
);
2706 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
2717 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
2718 f2fs_mark_inode_dirty_sync(inode
, false);
2719 return len
- towrite
;
2722 int f2fs_dquot_initialize(struct inode
*inode
)
2724 if (time_to_inject(F2FS_I_SB(inode
), FAULT_DQUOT_INIT
))
2727 return dquot_initialize(inode
);
2730 static struct dquot __rcu
**f2fs_get_dquots(struct inode
*inode
)
2732 return F2FS_I(inode
)->i_dquot
;
2735 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
2737 return &F2FS_I(inode
)->i_reserved_quota
;
2740 static int f2fs_quota_on_mount(struct f2fs_sb_info
*sbi
, int type
)
2742 if (is_set_ckpt_flags(sbi
, CP_QUOTA_NEED_FSCK_FLAG
)) {
2743 f2fs_err(sbi
, "quota sysfile may be corrupted, skip loading it");
2747 return dquot_quota_on_mount(sbi
->sb
, F2FS_OPTION(sbi
).s_qf_names
[type
],
2748 F2FS_OPTION(sbi
).s_jquota_fmt
, type
);
2751 int f2fs_enable_quota_files(struct f2fs_sb_info
*sbi
, bool rdonly
)
2756 if (f2fs_sb_has_quota_ino(sbi
) && rdonly
) {
2757 err
= f2fs_enable_quotas(sbi
->sb
);
2759 f2fs_err(sbi
, "Cannot turn on quota_ino: %d", err
);
2765 for (i
= 0; i
< MAXQUOTAS
; i
++) {
2766 if (F2FS_OPTION(sbi
).s_qf_names
[i
]) {
2767 err
= f2fs_quota_on_mount(sbi
, i
);
2772 f2fs_err(sbi
, "Cannot turn on quotas: %d on %d",
2779 static int f2fs_quota_enable(struct super_block
*sb
, int type
, int format_id
,
2782 struct inode
*qf_inode
;
2783 unsigned long qf_inum
;
2784 unsigned long qf_flag
= F2FS_QUOTA_DEFAULT_FL
;
2787 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb
)));
2789 qf_inum
= f2fs_qf_ino(sb
, type
);
2793 qf_inode
= f2fs_iget(sb
, qf_inum
);
2794 if (IS_ERR(qf_inode
)) {
2795 f2fs_err(F2FS_SB(sb
), "Bad quota inode %u:%lu", type
, qf_inum
);
2796 return PTR_ERR(qf_inode
);
2799 /* Don't account quota for quota files to avoid recursion */
2800 inode_lock(qf_inode
);
2801 qf_inode
->i_flags
|= S_NOQUOTA
;
2803 if ((F2FS_I(qf_inode
)->i_flags
& qf_flag
) != qf_flag
) {
2804 F2FS_I(qf_inode
)->i_flags
|= qf_flag
;
2805 f2fs_set_inode_flags(qf_inode
);
2807 inode_unlock(qf_inode
);
2809 err
= dquot_load_quota_inode(qf_inode
, type
, format_id
, flags
);
2814 static int f2fs_enable_quotas(struct super_block
*sb
)
2816 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2818 unsigned long qf_inum
;
2819 bool quota_mopt
[MAXQUOTAS
] = {
2820 test_opt(sbi
, USRQUOTA
),
2821 test_opt(sbi
, GRPQUOTA
),
2822 test_opt(sbi
, PRJQUOTA
),
2825 if (is_set_ckpt_flags(F2FS_SB(sb
), CP_QUOTA_NEED_FSCK_FLAG
)) {
2826 f2fs_err(sbi
, "quota file may be corrupted, skip loading it");
2830 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
;
2832 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2833 qf_inum
= f2fs_qf_ino(sb
, type
);
2835 err
= f2fs_quota_enable(sb
, type
, QFMT_VFS_V1
,
2836 DQUOT_USAGE_ENABLED
|
2837 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
2839 f2fs_err(sbi
, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2841 for (type
--; type
>= 0; type
--)
2842 dquot_quota_off(sb
, type
);
2843 set_sbi_flag(F2FS_SB(sb
),
2844 SBI_QUOTA_NEED_REPAIR
);
2852 static int f2fs_quota_sync_file(struct f2fs_sb_info
*sbi
, int type
)
2854 struct quota_info
*dqopt
= sb_dqopt(sbi
->sb
);
2855 struct address_space
*mapping
= dqopt
->files
[type
]->i_mapping
;
2858 ret
= dquot_writeback_dquots(sbi
->sb
, type
);
2862 ret
= filemap_fdatawrite(mapping
);
2866 /* if we are using journalled quota */
2867 if (is_journalled_quota(sbi
))
2870 ret
= filemap_fdatawait(mapping
);
2872 truncate_inode_pages(&dqopt
->files
[type
]->i_data
, 0);
2875 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2879 int f2fs_quota_sync(struct super_block
*sb
, int type
)
2881 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
2882 struct quota_info
*dqopt
= sb_dqopt(sb
);
2887 * Now when everything is written we can discard the pagecache so
2888 * that userspace sees the changes.
2890 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2892 if (type
!= -1 && cnt
!= type
)
2895 if (!sb_has_quota_active(sb
, cnt
))
2898 if (!f2fs_sb_has_quota_ino(sbi
))
2899 inode_lock(dqopt
->files
[cnt
]);
2904 * f2fs_down_read(quota_sem)
2905 * dquot_writeback_dquots()
2908 * f2fs_down_read(quota_sem)
2911 f2fs_down_read(&sbi
->quota_sem
);
2913 ret
= f2fs_quota_sync_file(sbi
, cnt
);
2915 f2fs_up_read(&sbi
->quota_sem
);
2916 f2fs_unlock_op(sbi
);
2918 if (!f2fs_sb_has_quota_ino(sbi
))
2919 inode_unlock(dqopt
->files
[cnt
]);
2927 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
2928 const struct path
*path
)
2930 struct inode
*inode
;
2933 /* if quota sysfile exists, deny enabling quota with specific file */
2934 if (f2fs_sb_has_quota_ino(F2FS_SB(sb
))) {
2935 f2fs_err(F2FS_SB(sb
), "quota sysfile already exists");
2939 if (path
->dentry
->d_sb
!= sb
)
2942 err
= f2fs_quota_sync(sb
, type
);
2946 inode
= d_inode(path
->dentry
);
2948 err
= filemap_fdatawrite(inode
->i_mapping
);
2952 err
= filemap_fdatawait(inode
->i_mapping
);
2956 err
= dquot_quota_on(sb
, type
, format_id
, path
);
2961 F2FS_I(inode
)->i_flags
|= F2FS_QUOTA_DEFAULT_FL
;
2962 f2fs_set_inode_flags(inode
);
2963 inode_unlock(inode
);
2964 f2fs_mark_inode_dirty_sync(inode
, false);
2969 static int __f2fs_quota_off(struct super_block
*sb
, int type
)
2971 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
2974 if (!inode
|| !igrab(inode
))
2975 return dquot_quota_off(sb
, type
);
2977 err
= f2fs_quota_sync(sb
, type
);
2981 err
= dquot_quota_off(sb
, type
);
2982 if (err
|| f2fs_sb_has_quota_ino(F2FS_SB(sb
)))
2986 F2FS_I(inode
)->i_flags
&= ~F2FS_QUOTA_DEFAULT_FL
;
2987 f2fs_set_inode_flags(inode
);
2988 inode_unlock(inode
);
2989 f2fs_mark_inode_dirty_sync(inode
, false);
2995 static int f2fs_quota_off(struct super_block
*sb
, int type
)
2997 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3000 err
= __f2fs_quota_off(sb
, type
);
3003 * quotactl can shutdown journalled quota, result in inconsistence
3004 * between quota record and fs data by following updates, tag the
3005 * flag to let fsck be aware of it.
3007 if (is_journalled_quota(sbi
))
3008 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3012 void f2fs_quota_off_umount(struct super_block
*sb
)
3017 for (type
= 0; type
< MAXQUOTAS
; type
++) {
3018 err
= __f2fs_quota_off(sb
, type
);
3020 int ret
= dquot_quota_off(sb
, type
);
3022 f2fs_err(F2FS_SB(sb
), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
3024 set_sbi_flag(F2FS_SB(sb
), SBI_QUOTA_NEED_REPAIR
);
3028 * In case of checkpoint=disable, we must flush quota blocks.
3029 * This can cause NULL exception for node_inode in end_io, since
3030 * put_super already dropped it.
3032 sync_filesystem(sb
);
3035 static void f2fs_truncate_quota_inode_pages(struct super_block
*sb
)
3037 struct quota_info
*dqopt
= sb_dqopt(sb
);
3040 for (type
= 0; type
< MAXQUOTAS
; type
++) {
3041 if (!dqopt
->files
[type
])
3043 f2fs_inode_synced(dqopt
->files
[type
]);
3047 static int f2fs_dquot_commit(struct dquot
*dquot
)
3049 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
3052 f2fs_down_read_nested(&sbi
->quota_sem
, SINGLE_DEPTH_NESTING
);
3053 ret
= dquot_commit(dquot
);
3055 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3056 f2fs_up_read(&sbi
->quota_sem
);
3060 static int f2fs_dquot_acquire(struct dquot
*dquot
)
3062 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
3065 f2fs_down_read(&sbi
->quota_sem
);
3066 ret
= dquot_acquire(dquot
);
3068 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3069 f2fs_up_read(&sbi
->quota_sem
);
3073 static int f2fs_dquot_release(struct dquot
*dquot
)
3075 struct f2fs_sb_info
*sbi
= F2FS_SB(dquot
->dq_sb
);
3076 int ret
= dquot_release(dquot
);
3079 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3083 static int f2fs_dquot_mark_dquot_dirty(struct dquot
*dquot
)
3085 struct super_block
*sb
= dquot
->dq_sb
;
3086 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3087 int ret
= dquot_mark_dquot_dirty(dquot
);
3089 /* if we are using journalled quota */
3090 if (is_journalled_quota(sbi
))
3091 set_sbi_flag(sbi
, SBI_QUOTA_NEED_FLUSH
);
3096 static int f2fs_dquot_commit_info(struct super_block
*sb
, int type
)
3098 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3099 int ret
= dquot_commit_info(sb
, type
);
3102 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
3106 static int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
3108 *projid
= F2FS_I(inode
)->i_projid
;
3112 static const struct dquot_operations f2fs_quota_operations
= {
3113 .get_reserved_space
= f2fs_get_reserved_space
,
3114 .write_dquot
= f2fs_dquot_commit
,
3115 .acquire_dquot
= f2fs_dquot_acquire
,
3116 .release_dquot
= f2fs_dquot_release
,
3117 .mark_dirty
= f2fs_dquot_mark_dquot_dirty
,
3118 .write_info
= f2fs_dquot_commit_info
,
3119 .alloc_dquot
= dquot_alloc
,
3120 .destroy_dquot
= dquot_destroy
,
3121 .get_projid
= f2fs_get_projid
,
3122 .get_next_id
= dquot_get_next_id
,
3125 static const struct quotactl_ops f2fs_quotactl_ops
= {
3126 .quota_on
= f2fs_quota_on
,
3127 .quota_off
= f2fs_quota_off
,
3128 .quota_sync
= f2fs_quota_sync
,
3129 .get_state
= dquot_get_state
,
3130 .set_info
= dquot_set_dqinfo
,
3131 .get_dqblk
= dquot_get_dqblk
,
3132 .set_dqblk
= dquot_set_dqblk
,
3133 .get_nextdqblk
= dquot_get_next_dqblk
,
3136 int f2fs_dquot_initialize(struct inode
*inode
)
3141 int f2fs_quota_sync(struct super_block
*sb
, int type
)
3146 void f2fs_quota_off_umount(struct super_block
*sb
)
3151 static const struct super_operations f2fs_sops
= {
3152 .alloc_inode
= f2fs_alloc_inode
,
3153 .free_inode
= f2fs_free_inode
,
3154 .drop_inode
= f2fs_drop_inode
,
3155 .write_inode
= f2fs_write_inode
,
3156 .dirty_inode
= f2fs_dirty_inode
,
3157 .show_options
= f2fs_show_options
,
3159 .quota_read
= f2fs_quota_read
,
3160 .quota_write
= f2fs_quota_write
,
3161 .get_dquots
= f2fs_get_dquots
,
3163 .evict_inode
= f2fs_evict_inode
,
3164 .put_super
= f2fs_put_super
,
3165 .sync_fs
= f2fs_sync_fs
,
3166 .freeze_fs
= f2fs_freeze
,
3167 .unfreeze_fs
= f2fs_unfreeze
,
3168 .statfs
= f2fs_statfs
,
3169 .remount_fs
= f2fs_remount
,
3170 .shutdown
= f2fs_shutdown
,
3173 #ifdef CONFIG_FS_ENCRYPTION
3174 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
3176 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
3177 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
3181 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
3184 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3187 * Encrypting the root directory is not allowed because fsck
3188 * expects lost+found directory to exist and remain unencrypted
3189 * if LOST_FOUND feature is enabled.
3192 if (f2fs_sb_has_lost_found(sbi
) &&
3193 inode
->i_ino
== F2FS_ROOT_INO(sbi
))
3196 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
3197 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
3198 ctx
, len
, fs_data
, XATTR_CREATE
);
3201 static const union fscrypt_policy
*f2fs_get_dummy_policy(struct super_block
*sb
)
3203 return F2FS_OPTION(F2FS_SB(sb
)).dummy_enc_policy
.policy
;
3206 static bool f2fs_has_stable_inodes(struct super_block
*sb
)
3211 static struct block_device
**f2fs_get_devices(struct super_block
*sb
,
3212 unsigned int *num_devs
)
3214 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3215 struct block_device
**devs
;
3218 if (!f2fs_is_multi_device(sbi
))
3221 devs
= kmalloc_array(sbi
->s_ndevs
, sizeof(*devs
), GFP_KERNEL
);
3223 return ERR_PTR(-ENOMEM
);
3225 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
3226 devs
[i
] = FDEV(i
).bdev
;
3227 *num_devs
= sbi
->s_ndevs
;
3231 static const struct fscrypt_operations f2fs_cryptops
= {
3232 .needs_bounce_pages
= 1,
3233 .has_32bit_inodes
= 1,
3234 .supports_subblock_data_units
= 1,
3235 .legacy_key_prefix
= "f2fs:",
3236 .get_context
= f2fs_get_context
,
3237 .set_context
= f2fs_set_context
,
3238 .get_dummy_policy
= f2fs_get_dummy_policy
,
3239 .empty_dir
= f2fs_empty_dir
,
3240 .has_stable_inodes
= f2fs_has_stable_inodes
,
3241 .get_devices
= f2fs_get_devices
,
3245 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
3246 u64 ino
, u32 generation
)
3248 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
3249 struct inode
*inode
;
3251 if (f2fs_check_nid_range(sbi
, ino
))
3252 return ERR_PTR(-ESTALE
);
3255 * f2fs_iget isn't quite right if the inode is currently unallocated!
3256 * However f2fs_iget currently does appropriate checks to handle stale
3257 * inodes so everything is OK.
3259 inode
= f2fs_iget(sb
, ino
);
3261 return ERR_CAST(inode
);
3262 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
3263 /* we didn't find the right inode.. */
3265 return ERR_PTR(-ESTALE
);
3270 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
3271 int fh_len
, int fh_type
)
3273 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
3274 f2fs_nfs_get_inode
);
3277 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
3278 int fh_len
, int fh_type
)
3280 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
3281 f2fs_nfs_get_inode
);
3284 static const struct export_operations f2fs_export_ops
= {
3285 .encode_fh
= generic_encode_ino32_fh
,
3286 .fh_to_dentry
= f2fs_fh_to_dentry
,
3287 .fh_to_parent
= f2fs_fh_to_parent
,
3288 .get_parent
= f2fs_get_parent
,
3291 loff_t
max_file_blocks(struct inode
*inode
)
3297 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3298 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3299 * space in inode.i_addr, it will be more safe to reassign
3303 if (inode
&& f2fs_compressed_file(inode
))
3304 leaf_count
= ADDRS_PER_BLOCK(inode
);
3306 leaf_count
= DEF_ADDRS_PER_BLOCK
;
3308 /* two direct node blocks */
3309 result
+= (leaf_count
* 2);
3311 /* two indirect node blocks */
3312 leaf_count
*= NIDS_PER_BLOCK
;
3313 result
+= (leaf_count
* 2);
3315 /* one double indirect node block */
3316 leaf_count
*= NIDS_PER_BLOCK
;
3317 result
+= leaf_count
;
3320 * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
3321 * a 4K crypto data unit, we must restrict the max filesize to what can
3322 * fit within U32_MAX + 1 data units.
3325 result
= min(result
, F2FS_BYTES_TO_BLK(((loff_t
)U32_MAX
+ 1) * 4096));
3330 static int __f2fs_commit_super(struct f2fs_sb_info
*sbi
, struct folio
*folio
,
3331 pgoff_t index
, bool update
)
3334 /* it's rare case, we can do fua all the time */
3335 blk_opf_t opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
;
3339 folio_wait_writeback(folio
);
3341 memcpy(F2FS_SUPER_BLOCK(folio
, index
), F2FS_RAW_SUPER(sbi
),
3342 sizeof(struct f2fs_super_block
));
3343 folio_mark_dirty(folio
);
3344 folio_clear_dirty_for_io(folio
);
3345 folio_start_writeback(folio
);
3346 folio_unlock(folio
);
3348 bio
= bio_alloc(sbi
->sb
->s_bdev
, 1, opf
, GFP_NOFS
);
3350 /* it doesn't need to set crypto context for superblock update */
3351 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(folio_index(folio
));
3353 if (!bio_add_folio(bio
, folio
, folio_size(folio
), 0))
3354 f2fs_bug_on(sbi
, 1);
3356 ret
= submit_bio_wait(bio
);
3357 folio_end_writeback(folio
);
3362 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
3363 struct folio
*folio
, pgoff_t index
)
3365 struct f2fs_super_block
*raw_super
= F2FS_SUPER_BLOCK(folio
, index
);
3366 struct super_block
*sb
= sbi
->sb
;
3367 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
3368 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
3369 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
3370 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
3371 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
3372 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
3373 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
3374 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
3375 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
3376 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
3377 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
3378 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
3379 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
3380 u64 main_end_blkaddr
= main_blkaddr
+
3381 ((u64
)segment_count_main
<< log_blocks_per_seg
);
3382 u64 seg_end_blkaddr
= segment0_blkaddr
+
3383 ((u64
)segment_count
<< log_blocks_per_seg
);
3385 if (segment0_blkaddr
!= cp_blkaddr
) {
3386 f2fs_info(sbi
, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3387 segment0_blkaddr
, cp_blkaddr
);
3391 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
3393 f2fs_info(sbi
, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3394 cp_blkaddr
, sit_blkaddr
,
3395 segment_count_ckpt
<< log_blocks_per_seg
);
3399 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
3401 f2fs_info(sbi
, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3402 sit_blkaddr
, nat_blkaddr
,
3403 segment_count_sit
<< log_blocks_per_seg
);
3407 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
3409 f2fs_info(sbi
, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3410 nat_blkaddr
, ssa_blkaddr
,
3411 segment_count_nat
<< log_blocks_per_seg
);
3415 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
3417 f2fs_info(sbi
, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3418 ssa_blkaddr
, main_blkaddr
,
3419 segment_count_ssa
<< log_blocks_per_seg
);
3423 if (main_end_blkaddr
> seg_end_blkaddr
) {
3424 f2fs_info(sbi
, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3425 main_blkaddr
, seg_end_blkaddr
,
3426 segment_count_main
<< log_blocks_per_seg
);
3428 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
3432 /* fix in-memory information all the time */
3433 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
3434 segment0_blkaddr
) >> log_blocks_per_seg
);
3436 if (f2fs_readonly(sb
) || f2fs_hw_is_readonly(sbi
)) {
3437 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
3440 err
= __f2fs_commit_super(sbi
, folio
, index
, false);
3441 res
= err
? "failed" : "done";
3443 f2fs_info(sbi
, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3444 res
, main_blkaddr
, seg_end_blkaddr
,
3445 segment_count_main
<< log_blocks_per_seg
);
3452 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
3453 struct folio
*folio
, pgoff_t index
)
3455 block_t segment_count
, segs_per_sec
, secs_per_zone
, segment_count_main
;
3456 block_t total_sections
, blocks_per_seg
;
3457 struct f2fs_super_block
*raw_super
= F2FS_SUPER_BLOCK(folio
, index
);
3458 size_t crc_offset
= 0;
3461 if (le32_to_cpu(raw_super
->magic
) != F2FS_SUPER_MAGIC
) {
3462 f2fs_info(sbi
, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3463 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
3467 /* Check checksum_offset and crc in superblock */
3468 if (__F2FS_HAS_FEATURE(raw_super
, F2FS_FEATURE_SB_CHKSUM
)) {
3469 crc_offset
= le32_to_cpu(raw_super
->checksum_offset
);
3471 offsetof(struct f2fs_super_block
, crc
)) {
3472 f2fs_info(sbi
, "Invalid SB checksum offset: %zu",
3474 return -EFSCORRUPTED
;
3476 crc
= le32_to_cpu(raw_super
->crc
);
3477 if (!f2fs_crc_valid(sbi
, crc
, raw_super
, crc_offset
)) {
3478 f2fs_info(sbi
, "Invalid SB checksum value: %u", crc
);
3479 return -EFSCORRUPTED
;
3483 /* only support block_size equals to PAGE_SIZE */
3484 if (le32_to_cpu(raw_super
->log_blocksize
) != F2FS_BLKSIZE_BITS
) {
3485 f2fs_info(sbi
, "Invalid log_blocksize (%u), supports only %u",
3486 le32_to_cpu(raw_super
->log_blocksize
),
3488 return -EFSCORRUPTED
;
3491 /* check log blocks per segment */
3492 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
3493 f2fs_info(sbi
, "Invalid log blocks per segment (%u)",
3494 le32_to_cpu(raw_super
->log_blocks_per_seg
));
3495 return -EFSCORRUPTED
;
3498 /* Currently, support 512/1024/2048/4096/16K bytes sector size */
3499 if (le32_to_cpu(raw_super
->log_sectorsize
) >
3500 F2FS_MAX_LOG_SECTOR_SIZE
||
3501 le32_to_cpu(raw_super
->log_sectorsize
) <
3502 F2FS_MIN_LOG_SECTOR_SIZE
) {
3503 f2fs_info(sbi
, "Invalid log sectorsize (%u)",
3504 le32_to_cpu(raw_super
->log_sectorsize
));
3505 return -EFSCORRUPTED
;
3507 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
3508 le32_to_cpu(raw_super
->log_sectorsize
) !=
3509 F2FS_MAX_LOG_SECTOR_SIZE
) {
3510 f2fs_info(sbi
, "Invalid log sectors per block(%u) log sectorsize(%u)",
3511 le32_to_cpu(raw_super
->log_sectors_per_block
),
3512 le32_to_cpu(raw_super
->log_sectorsize
));
3513 return -EFSCORRUPTED
;
3516 segment_count
= le32_to_cpu(raw_super
->segment_count
);
3517 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
3518 segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
3519 secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
3520 total_sections
= le32_to_cpu(raw_super
->section_count
);
3522 /* blocks_per_seg should be 512, given the above check */
3523 blocks_per_seg
= BIT(le32_to_cpu(raw_super
->log_blocks_per_seg
));
3525 if (segment_count
> F2FS_MAX_SEGMENT
||
3526 segment_count
< F2FS_MIN_SEGMENTS
) {
3527 f2fs_info(sbi
, "Invalid segment count (%u)", segment_count
);
3528 return -EFSCORRUPTED
;
3531 if (total_sections
> segment_count_main
|| total_sections
< 1 ||
3532 segs_per_sec
> segment_count
|| !segs_per_sec
) {
3533 f2fs_info(sbi
, "Invalid segment/section count (%u, %u x %u)",
3534 segment_count
, total_sections
, segs_per_sec
);
3535 return -EFSCORRUPTED
;
3538 if (segment_count_main
!= total_sections
* segs_per_sec
) {
3539 f2fs_info(sbi
, "Invalid segment/section count (%u != %u * %u)",
3540 segment_count_main
, total_sections
, segs_per_sec
);
3541 return -EFSCORRUPTED
;
3544 if ((segment_count
/ segs_per_sec
) < total_sections
) {
3545 f2fs_info(sbi
, "Small segment_count (%u < %u * %u)",
3546 segment_count
, segs_per_sec
, total_sections
);
3547 return -EFSCORRUPTED
;
3550 if (segment_count
> (le64_to_cpu(raw_super
->block_count
) >> 9)) {
3551 f2fs_info(sbi
, "Wrong segment_count / block_count (%u > %llu)",
3552 segment_count
, le64_to_cpu(raw_super
->block_count
));
3553 return -EFSCORRUPTED
;
3556 if (RDEV(0).path
[0]) {
3557 block_t dev_seg_count
= le32_to_cpu(RDEV(0).total_segments
);
3560 while (i
< MAX_DEVICES
&& RDEV(i
).path
[0]) {
3561 dev_seg_count
+= le32_to_cpu(RDEV(i
).total_segments
);
3564 if (segment_count
!= dev_seg_count
) {
3565 f2fs_info(sbi
, "Segment count (%u) mismatch with total segments from devices (%u)",
3566 segment_count
, dev_seg_count
);
3567 return -EFSCORRUPTED
;
3570 if (__F2FS_HAS_FEATURE(raw_super
, F2FS_FEATURE_BLKZONED
) &&
3571 !bdev_is_zoned(sbi
->sb
->s_bdev
)) {
3572 f2fs_info(sbi
, "Zoned block device path is missing");
3573 return -EFSCORRUPTED
;
3577 if (secs_per_zone
> total_sections
|| !secs_per_zone
) {
3578 f2fs_info(sbi
, "Wrong secs_per_zone / total_sections (%u, %u)",
3579 secs_per_zone
, total_sections
);
3580 return -EFSCORRUPTED
;
3582 if (le32_to_cpu(raw_super
->extension_count
) > F2FS_MAX_EXTENSION
||
3583 raw_super
->hot_ext_count
> F2FS_MAX_EXTENSION
||
3584 (le32_to_cpu(raw_super
->extension_count
) +
3585 raw_super
->hot_ext_count
) > F2FS_MAX_EXTENSION
) {
3586 f2fs_info(sbi
, "Corrupted extension count (%u + %u > %u)",
3587 le32_to_cpu(raw_super
->extension_count
),
3588 raw_super
->hot_ext_count
,
3589 F2FS_MAX_EXTENSION
);
3590 return -EFSCORRUPTED
;
3593 if (le32_to_cpu(raw_super
->cp_payload
) >=
3594 (blocks_per_seg
- F2FS_CP_PACKS
-
3595 NR_CURSEG_PERSIST_TYPE
)) {
3596 f2fs_info(sbi
, "Insane cp_payload (%u >= %u)",
3597 le32_to_cpu(raw_super
->cp_payload
),
3598 blocks_per_seg
- F2FS_CP_PACKS
-
3599 NR_CURSEG_PERSIST_TYPE
);
3600 return -EFSCORRUPTED
;
3603 /* check reserved ino info */
3604 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
3605 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
3606 le32_to_cpu(raw_super
->root_ino
) != 3) {
3607 f2fs_info(sbi
, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3608 le32_to_cpu(raw_super
->node_ino
),
3609 le32_to_cpu(raw_super
->meta_ino
),
3610 le32_to_cpu(raw_super
->root_ino
));
3611 return -EFSCORRUPTED
;
3614 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3615 if (sanity_check_area_boundary(sbi
, folio
, index
))
3616 return -EFSCORRUPTED
;
3621 int f2fs_sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
3623 unsigned int total
, fsmeta
;
3624 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
3625 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
3626 unsigned int ovp_segments
, reserved_segments
;
3627 unsigned int main_segs
, blocks_per_seg
;
3628 unsigned int sit_segs
, nat_segs
;
3629 unsigned int sit_bitmap_size
, nat_bitmap_size
;
3630 unsigned int log_blocks_per_seg
;
3631 unsigned int segment_count_main
;
3632 unsigned int cp_pack_start_sum
, cp_payload
;
3633 block_t user_block_count
, valid_user_blocks
;
3634 block_t avail_node_count
, valid_node_count
;
3635 unsigned int nat_blocks
, nat_bits_bytes
, nat_bits_blocks
;
3638 total
= le32_to_cpu(raw_super
->segment_count
);
3639 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
3640 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
3642 nat_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
3644 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
3645 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
3647 if (unlikely(fsmeta
>= total
))
3650 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
3651 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
3653 if (!f2fs_sb_has_readonly(sbi
) &&
3654 unlikely(fsmeta
< F2FS_MIN_META_SEGMENTS
||
3655 ovp_segments
== 0 || reserved_segments
== 0)) {
3656 f2fs_err(sbi
, "Wrong layout: check mkfs.f2fs version");
3659 user_block_count
= le64_to_cpu(ckpt
->user_block_count
);
3660 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
) +
3661 (f2fs_sb_has_readonly(sbi
) ? 1 : 0);
3662 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
3663 if (!user_block_count
|| user_block_count
>=
3664 segment_count_main
<< log_blocks_per_seg
) {
3665 f2fs_err(sbi
, "Wrong user_block_count: %u",
3670 valid_user_blocks
= le64_to_cpu(ckpt
->valid_block_count
);
3671 if (valid_user_blocks
> user_block_count
) {
3672 f2fs_err(sbi
, "Wrong valid_user_blocks: %u, user_block_count: %u",
3673 valid_user_blocks
, user_block_count
);
3677 valid_node_count
= le32_to_cpu(ckpt
->valid_node_count
);
3678 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
3679 if (valid_node_count
> avail_node_count
) {
3680 f2fs_err(sbi
, "Wrong valid_node_count: %u, avail_node_count: %u",
3681 valid_node_count
, avail_node_count
);
3685 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
3686 blocks_per_seg
= BLKS_PER_SEG(sbi
);
3688 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
3689 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
3690 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
3693 if (f2fs_sb_has_readonly(sbi
))
3696 for (j
= i
+ 1; j
< NR_CURSEG_NODE_TYPE
; j
++) {
3697 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
3698 le32_to_cpu(ckpt
->cur_node_segno
[j
])) {
3699 f2fs_err(sbi
, "Node segment (%u, %u) has the same segno: %u",
3701 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
3707 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
3708 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
3709 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
3712 if (f2fs_sb_has_readonly(sbi
))
3715 for (j
= i
+ 1; j
< NR_CURSEG_DATA_TYPE
; j
++) {
3716 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) ==
3717 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
3718 f2fs_err(sbi
, "Data segment (%u, %u) has the same segno: %u",
3720 le32_to_cpu(ckpt
->cur_data_segno
[i
]));
3725 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
3726 for (j
= 0; j
< NR_CURSEG_DATA_TYPE
; j
++) {
3727 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) ==
3728 le32_to_cpu(ckpt
->cur_data_segno
[j
])) {
3729 f2fs_err(sbi
, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3731 le32_to_cpu(ckpt
->cur_node_segno
[i
]));
3737 sit_bitmap_size
= le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
3738 nat_bitmap_size
= le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
3740 if (sit_bitmap_size
!= ((sit_segs
/ 2) << log_blocks_per_seg
) / 8 ||
3741 nat_bitmap_size
!= ((nat_segs
/ 2) << log_blocks_per_seg
) / 8) {
3742 f2fs_err(sbi
, "Wrong bitmap size: sit: %u, nat:%u",
3743 sit_bitmap_size
, nat_bitmap_size
);
3747 cp_pack_start_sum
= __start_sum_addr(sbi
);
3748 cp_payload
= __cp_payload(sbi
);
3749 if (cp_pack_start_sum
< cp_payload
+ 1 ||
3750 cp_pack_start_sum
> blocks_per_seg
- 1 -
3751 NR_CURSEG_PERSIST_TYPE
) {
3752 f2fs_err(sbi
, "Wrong cp_pack_start_sum: %u",
3757 if (__is_set_ckpt_flags(ckpt
, CP_LARGE_NAT_BITMAP_FLAG
) &&
3758 le32_to_cpu(ckpt
->checksum_offset
) != CP_MIN_CHKSUM_OFFSET
) {
3759 f2fs_warn(sbi
, "using deprecated layout of large_nat_bitmap, "
3760 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3761 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3762 le32_to_cpu(ckpt
->checksum_offset
));
3766 nat_blocks
= nat_segs
<< log_blocks_per_seg
;
3767 nat_bits_bytes
= nat_blocks
/ BITS_PER_BYTE
;
3768 nat_bits_blocks
= F2FS_BLK_ALIGN((nat_bits_bytes
<< 1) + 8);
3769 if (__is_set_ckpt_flags(ckpt
, CP_NAT_BITS_FLAG
) &&
3770 (cp_payload
+ F2FS_CP_PACKS
+
3771 NR_CURSEG_PERSIST_TYPE
+ nat_bits_blocks
>= blocks_per_seg
)) {
3772 f2fs_warn(sbi
, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3773 cp_payload
, nat_bits_blocks
);
3777 if (unlikely(f2fs_cp_error(sbi
))) {
3778 f2fs_err(sbi
, "A bug case: need to run fsck");
3784 static void init_sb_info(struct f2fs_sb_info
*sbi
)
3786 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
3789 sbi
->log_sectors_per_block
=
3790 le32_to_cpu(raw_super
->log_sectors_per_block
);
3791 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
3792 sbi
->blocksize
= BIT(sbi
->log_blocksize
);
3793 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
3794 sbi
->blocks_per_seg
= BIT(sbi
->log_blocks_per_seg
);
3795 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
3796 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
3797 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
3798 sbi
->total_node_count
= SEGS_TO_BLKS(sbi
,
3799 ((le32_to_cpu(raw_super
->segment_count_nat
) / 2) *
3800 NAT_ENTRY_PER_BLOCK
));
3801 F2FS_ROOT_INO(sbi
) = le32_to_cpu(raw_super
->root_ino
);
3802 F2FS_NODE_INO(sbi
) = le32_to_cpu(raw_super
->node_ino
);
3803 F2FS_META_INO(sbi
) = le32_to_cpu(raw_super
->meta_ino
);
3804 sbi
->cur_victim_sec
= NULL_SECNO
;
3805 sbi
->gc_mode
= GC_NORMAL
;
3806 sbi
->next_victim_seg
[BG_GC
] = NULL_SEGNO
;
3807 sbi
->next_victim_seg
[FG_GC
] = NULL_SEGNO
;
3808 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
3809 sbi
->migration_granularity
= SEGS_PER_SEC(sbi
);
3810 sbi
->migration_window_granularity
= f2fs_sb_has_blkzoned(sbi
) ?
3811 DEF_MIGRATION_WINDOW_GRANULARITY_ZONED
: SEGS_PER_SEC(sbi
);
3812 sbi
->seq_file_ra_mul
= MIN_RA_MUL
;
3813 sbi
->max_fragment_chunk
= DEF_FRAGMENT_SIZE
;
3814 sbi
->max_fragment_hole
= DEF_FRAGMENT_SIZE
;
3815 spin_lock_init(&sbi
->gc_remaining_trials_lock
);
3816 atomic64_set(&sbi
->current_atomic_write
, 0);
3818 sbi
->dir_level
= DEF_DIR_LEVEL
;
3819 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
3820 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
3821 sbi
->interval_time
[DISCARD_TIME
] = DEF_IDLE_INTERVAL
;
3822 sbi
->interval_time
[GC_TIME
] = DEF_IDLE_INTERVAL
;
3823 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_INTERVAL
;
3824 sbi
->interval_time
[UMOUNT_DISCARD_TIMEOUT
] =
3825 DEF_UMOUNT_DISCARD_TIMEOUT
;
3826 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
3828 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
3829 atomic_set(&sbi
->nr_pages
[i
], 0);
3831 for (i
= 0; i
< META
; i
++)
3832 atomic_set(&sbi
->wb_sync_req
[i
], 0);
3834 INIT_LIST_HEAD(&sbi
->s_list
);
3835 mutex_init(&sbi
->umount_mutex
);
3836 init_f2fs_rwsem(&sbi
->io_order_lock
);
3837 spin_lock_init(&sbi
->cp_lock
);
3839 sbi
->dirty_device
= 0;
3840 spin_lock_init(&sbi
->dev_lock
);
3842 init_f2fs_rwsem(&sbi
->sb_lock
);
3843 init_f2fs_rwsem(&sbi
->pin_sem
);
3846 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
3850 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
3854 err
= percpu_counter_init(&sbi
->rf_node_block_count
, 0, GFP_KERNEL
);
3856 goto err_valid_block
;
3858 err
= percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
3861 goto err_node_block
;
3865 percpu_counter_destroy(&sbi
->rf_node_block_count
);
3867 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
3871 #ifdef CONFIG_BLK_DEV_ZONED
3873 struct f2fs_report_zones_args
{
3874 struct f2fs_sb_info
*sbi
;
3875 struct f2fs_dev_info
*dev
;
3878 static int f2fs_report_zone_cb(struct blk_zone
*zone
, unsigned int idx
,
3881 struct f2fs_report_zones_args
*rz_args
= data
;
3882 block_t unusable_blocks
= (zone
->len
- zone
->capacity
) >>
3883 F2FS_LOG_SECTORS_PER_BLOCK
;
3885 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
3888 set_bit(idx
, rz_args
->dev
->blkz_seq
);
3889 if (!rz_args
->sbi
->unusable_blocks_per_sec
) {
3890 rz_args
->sbi
->unusable_blocks_per_sec
= unusable_blocks
;
3893 if (rz_args
->sbi
->unusable_blocks_per_sec
!= unusable_blocks
) {
3894 f2fs_err(rz_args
->sbi
, "F2FS supports single zone capacity\n");
3900 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
3902 struct block_device
*bdev
= FDEV(devi
).bdev
;
3903 sector_t nr_sectors
= bdev_nr_sectors(bdev
);
3904 struct f2fs_report_zones_args rep_zone_arg
;
3906 unsigned int max_open_zones
;
3909 if (!f2fs_sb_has_blkzoned(sbi
))
3912 if (bdev_is_zoned(FDEV(devi
).bdev
)) {
3913 max_open_zones
= bdev_max_open_zones(bdev
);
3914 if (max_open_zones
&& (max_open_zones
< sbi
->max_open_zones
))
3915 sbi
->max_open_zones
= max_open_zones
;
3916 if (sbi
->max_open_zones
< F2FS_OPTION(sbi
).active_logs
) {
3918 "zoned: max open zones %u is too small, need at least %u open zones",
3919 sbi
->max_open_zones
, F2FS_OPTION(sbi
).active_logs
);
3924 zone_sectors
= bdev_zone_sectors(bdev
);
3925 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
3926 SECTOR_TO_BLOCK(zone_sectors
))
3928 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(zone_sectors
);
3929 FDEV(devi
).nr_blkz
= div_u64(SECTOR_TO_BLOCK(nr_sectors
),
3930 sbi
->blocks_per_blkz
);
3931 if (nr_sectors
& (zone_sectors
- 1))
3932 FDEV(devi
).nr_blkz
++;
3934 FDEV(devi
).blkz_seq
= f2fs_kvzalloc(sbi
,
3935 BITS_TO_LONGS(FDEV(devi
).nr_blkz
)
3936 * sizeof(unsigned long),
3938 if (!FDEV(devi
).blkz_seq
)
3941 rep_zone_arg
.sbi
= sbi
;
3942 rep_zone_arg
.dev
= &FDEV(devi
);
3944 ret
= blkdev_report_zones(bdev
, 0, BLK_ALL_ZONES
, f2fs_report_zone_cb
,
3953 * Read f2fs raw super block.
3954 * Because we have two copies of super block, so read both of them
3955 * to get the first valid one. If any one of them is broken, we pass
3956 * them recovery flag back to the caller.
3958 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
3959 struct f2fs_super_block
**raw_super
,
3960 int *valid_super_block
, int *recovery
)
3962 struct super_block
*sb
= sbi
->sb
;
3964 struct folio
*folio
;
3965 struct f2fs_super_block
*super
;
3968 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
3972 for (block
= 0; block
< 2; block
++) {
3973 folio
= read_mapping_folio(sb
->s_bdev
->bd_mapping
, block
, NULL
);
3974 if (IS_ERR(folio
)) {
3975 f2fs_err(sbi
, "Unable to read %dth superblock",
3977 err
= PTR_ERR(folio
);
3982 /* sanity checking of raw super */
3983 err
= sanity_check_raw_super(sbi
, folio
, block
);
3985 f2fs_err(sbi
, "Can't find valid F2FS filesystem in %dth superblock",
3993 memcpy(super
, F2FS_SUPER_BLOCK(folio
, block
),
3995 *valid_super_block
= block
;
4001 /* No valid superblock */
4010 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
4012 struct folio
*folio
;
4017 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
4018 f2fs_hw_is_readonly(sbi
)) {
4019 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
4023 /* we should update superblock crc here */
4024 if (!recover
&& f2fs_sb_has_sb_chksum(sbi
)) {
4025 crc
= f2fs_crc32(sbi
, F2FS_RAW_SUPER(sbi
),
4026 offsetof(struct f2fs_super_block
, crc
));
4027 F2FS_RAW_SUPER(sbi
)->crc
= cpu_to_le32(crc
);
4030 /* write back-up superblock first */
4031 index
= sbi
->valid_super_block
? 0 : 1;
4032 folio
= read_mapping_folio(sbi
->sb
->s_bdev
->bd_mapping
, index
, NULL
);
4034 return PTR_ERR(folio
);
4035 err
= __f2fs_commit_super(sbi
, folio
, index
, true);
4038 /* if we are in recovery path, skip writing valid superblock */
4042 /* write current valid superblock */
4043 index
= sbi
->valid_super_block
;
4044 folio
= read_mapping_folio(sbi
->sb
->s_bdev
->bd_mapping
, index
, NULL
);
4046 return PTR_ERR(folio
);
4047 err
= __f2fs_commit_super(sbi
, folio
, index
, true);
4052 static void save_stop_reason(struct f2fs_sb_info
*sbi
, unsigned char reason
)
4054 unsigned long flags
;
4056 spin_lock_irqsave(&sbi
->error_lock
, flags
);
4057 if (sbi
->stop_reason
[reason
] < GENMASK(BITS_PER_BYTE
- 1, 0))
4058 sbi
->stop_reason
[reason
]++;
4059 spin_unlock_irqrestore(&sbi
->error_lock
, flags
);
4062 static void f2fs_record_stop_reason(struct f2fs_sb_info
*sbi
)
4064 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
4065 unsigned long flags
;
4068 f2fs_down_write(&sbi
->sb_lock
);
4070 spin_lock_irqsave(&sbi
->error_lock
, flags
);
4071 if (sbi
->error_dirty
) {
4072 memcpy(F2FS_RAW_SUPER(sbi
)->s_errors
, sbi
->errors
,
4074 sbi
->error_dirty
= false;
4076 memcpy(raw_super
->s_stop_reason
, sbi
->stop_reason
, MAX_STOP_REASON
);
4077 spin_unlock_irqrestore(&sbi
->error_lock
, flags
);
4079 err
= f2fs_commit_super(sbi
, false);
4081 f2fs_up_write(&sbi
->sb_lock
);
4083 f2fs_err_ratelimited(sbi
,
4084 "f2fs_commit_super fails to record stop_reason, err:%d",
4088 void f2fs_save_errors(struct f2fs_sb_info
*sbi
, unsigned char flag
)
4090 unsigned long flags
;
4092 spin_lock_irqsave(&sbi
->error_lock
, flags
);
4093 if (!test_bit(flag
, (unsigned long *)sbi
->errors
)) {
4094 set_bit(flag
, (unsigned long *)sbi
->errors
);
4095 sbi
->error_dirty
= true;
4097 spin_unlock_irqrestore(&sbi
->error_lock
, flags
);
4100 static bool f2fs_update_errors(struct f2fs_sb_info
*sbi
)
4102 unsigned long flags
;
4103 bool need_update
= false;
4105 spin_lock_irqsave(&sbi
->error_lock
, flags
);
4106 if (sbi
->error_dirty
) {
4107 memcpy(F2FS_RAW_SUPER(sbi
)->s_errors
, sbi
->errors
,
4109 sbi
->error_dirty
= false;
4112 spin_unlock_irqrestore(&sbi
->error_lock
, flags
);
4117 static void f2fs_record_errors(struct f2fs_sb_info
*sbi
, unsigned char error
)
4121 f2fs_down_write(&sbi
->sb_lock
);
4123 if (!f2fs_update_errors(sbi
))
4126 err
= f2fs_commit_super(sbi
, false);
4128 f2fs_err_ratelimited(sbi
,
4129 "f2fs_commit_super fails to record errors:%u, err:%d",
4132 f2fs_up_write(&sbi
->sb_lock
);
4135 void f2fs_handle_error(struct f2fs_sb_info
*sbi
, unsigned char error
)
4137 f2fs_save_errors(sbi
, error
);
4138 f2fs_record_errors(sbi
, error
);
4141 void f2fs_handle_error_async(struct f2fs_sb_info
*sbi
, unsigned char error
)
4143 f2fs_save_errors(sbi
, error
);
4145 if (!sbi
->error_dirty
)
4147 if (!test_bit(error
, (unsigned long *)sbi
->errors
))
4149 schedule_work(&sbi
->s_error_work
);
4152 static bool system_going_down(void)
4154 return system_state
== SYSTEM_HALT
|| system_state
== SYSTEM_POWER_OFF
4155 || system_state
== SYSTEM_RESTART
;
4158 void f2fs_handle_critical_error(struct f2fs_sb_info
*sbi
, unsigned char reason
,
4161 struct super_block
*sb
= sbi
->sb
;
4162 bool shutdown
= reason
== STOP_CP_REASON_SHUTDOWN
;
4163 bool continue_fs
= !shutdown
&&
4164 F2FS_OPTION(sbi
).errors
== MOUNT_ERRORS_CONTINUE
;
4166 set_ckpt_flags(sbi
, CP_ERROR_FLAG
);
4168 if (!f2fs_hw_is_readonly(sbi
)) {
4169 save_stop_reason(sbi
, reason
);
4171 if (irq_context
&& !shutdown
)
4172 schedule_work(&sbi
->s_error_work
);
4174 f2fs_record_stop_reason(sbi
);
4178 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
4179 * could panic during 'reboot -f' as the underlying device got already
4182 if (F2FS_OPTION(sbi
).errors
== MOUNT_ERRORS_PANIC
&&
4183 !shutdown
&& !system_going_down() &&
4184 !is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
))
4185 panic("F2FS-fs (device %s): panic forced after error\n",
4189 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
4192 * Continue filesystem operators if errors=continue. Should not set
4193 * RO by shutdown, since RO bypasses thaw_super which can hang the
4196 if (continue_fs
|| f2fs_readonly(sb
) || shutdown
) {
4197 f2fs_warn(sbi
, "Stopped filesystem due to reason: %d", reason
);
4201 f2fs_warn(sbi
, "Remounting filesystem read-only");
4204 * We have already set CP_ERROR_FLAG flag to stop all updates
4205 * to filesystem, so it doesn't need to set SB_RDONLY flag here
4206 * because the flag should be set covered w/ sb->s_umount semaphore
4207 * via remount procedure, otherwise, it will confuse code like
4208 * freeze_super() which will lead to deadlocks and other problems.
4212 static void f2fs_record_error_work(struct work_struct
*work
)
4214 struct f2fs_sb_info
*sbi
= container_of(work
,
4215 struct f2fs_sb_info
, s_error_work
);
4217 f2fs_record_stop_reason(sbi
);
4220 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
4222 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
4223 unsigned int max_devices
= MAX_DEVICES
;
4224 unsigned int logical_blksize
;
4225 blk_mode_t mode
= sb_open_mode(sbi
->sb
->s_flags
);
4228 /* Initialize single device information */
4229 if (!RDEV(0).path
[0]) {
4230 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
4236 * Initialize multiple devices information, or single
4237 * zoned block device information.
4239 sbi
->devs
= f2fs_kzalloc(sbi
,
4240 array_size(max_devices
,
4241 sizeof(struct f2fs_dev_info
)),
4246 logical_blksize
= bdev_logical_block_size(sbi
->sb
->s_bdev
);
4247 sbi
->aligned_blksize
= true;
4248 #ifdef CONFIG_BLK_DEV_ZONED
4249 sbi
->max_open_zones
= UINT_MAX
;
4250 sbi
->blkzone_alloc_policy
= BLKZONE_ALLOC_PRIOR_SEQ
;
4253 for (i
= 0; i
< max_devices
; i
++) {
4255 FDEV(0).bdev_file
= sbi
->sb
->s_bdev_file
;
4256 else if (!RDEV(i
).path
[0])
4259 if (max_devices
> 1) {
4260 /* Multi-device mount */
4261 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
4262 FDEV(i
).total_segments
=
4263 le32_to_cpu(RDEV(i
).total_segments
);
4265 FDEV(i
).start_blk
= 0;
4266 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
4268 FDEV(i
).total_segments
) - 1 +
4269 le32_to_cpu(raw_super
->segment0_blkaddr
);
4271 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
4272 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
4274 FDEV(i
).total_segments
) - 1;
4275 FDEV(i
).bdev_file
= bdev_file_open_by_path(
4276 FDEV(i
).path
, mode
, sbi
->sb
, NULL
);
4279 if (IS_ERR(FDEV(i
).bdev_file
))
4280 return PTR_ERR(FDEV(i
).bdev_file
);
4282 FDEV(i
).bdev
= file_bdev(FDEV(i
).bdev_file
);
4283 /* to release errored devices */
4284 sbi
->s_ndevs
= i
+ 1;
4286 if (logical_blksize
!= bdev_logical_block_size(FDEV(i
).bdev
))
4287 sbi
->aligned_blksize
= false;
4289 #ifdef CONFIG_BLK_DEV_ZONED
4290 if (bdev_is_zoned(FDEV(i
).bdev
)) {
4291 if (!f2fs_sb_has_blkzoned(sbi
)) {
4292 f2fs_err(sbi
, "Zoned block device feature not enabled");
4295 if (init_blkz_info(sbi
, i
)) {
4296 f2fs_err(sbi
, "Failed to initialize F2FS blkzone information");
4299 if (max_devices
== 1)
4301 f2fs_info(sbi
, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
4303 FDEV(i
).total_segments
,
4304 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
4308 f2fs_info(sbi
, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
4310 FDEV(i
).total_segments
,
4311 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
4316 static int f2fs_setup_casefold(struct f2fs_sb_info
*sbi
)
4318 #if IS_ENABLED(CONFIG_UNICODE)
4319 if (f2fs_sb_has_casefold(sbi
) && !sbi
->sb
->s_encoding
) {
4320 const struct f2fs_sb_encodings
*encoding_info
;
4321 struct unicode_map
*encoding
;
4322 __u16 encoding_flags
;
4324 encoding_info
= f2fs_sb_read_encoding(sbi
->raw_super
);
4325 if (!encoding_info
) {
4327 "Encoding requested by superblock is unknown");
4331 encoding_flags
= le16_to_cpu(sbi
->raw_super
->s_encoding_flags
);
4332 encoding
= utf8_load(encoding_info
->version
);
4333 if (IS_ERR(encoding
)) {
4335 "can't mount with superblock charset: %s-%u.%u.%u "
4336 "not supported by the kernel. flags: 0x%x.",
4337 encoding_info
->name
,
4338 unicode_major(encoding_info
->version
),
4339 unicode_minor(encoding_info
->version
),
4340 unicode_rev(encoding_info
->version
),
4342 return PTR_ERR(encoding
);
4344 f2fs_info(sbi
, "Using encoding defined by superblock: "
4345 "%s-%u.%u.%u with flags 0x%hx", encoding_info
->name
,
4346 unicode_major(encoding_info
->version
),
4347 unicode_minor(encoding_info
->version
),
4348 unicode_rev(encoding_info
->version
),
4351 sbi
->sb
->s_encoding
= encoding
;
4352 sbi
->sb
->s_encoding_flags
= encoding_flags
;
4355 if (f2fs_sb_has_casefold(sbi
)) {
4356 f2fs_err(sbi
, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
4363 static void f2fs_tuning_parameters(struct f2fs_sb_info
*sbi
)
4365 /* adjust parameters according to the volume size */
4366 if (MAIN_SEGS(sbi
) <= SMALL_VOLUME_SEGMENTS
) {
4367 if (f2fs_block_unit_discard(sbi
))
4368 SM_I(sbi
)->dcc_info
->discard_granularity
=
4369 MIN_DISCARD_GRANULARITY
;
4370 if (!f2fs_lfs_mode(sbi
))
4371 SM_I(sbi
)->ipu_policy
= BIT(F2FS_IPU_FORCE
) |
4372 BIT(F2FS_IPU_HONOR_OPU_WRITE
);
4375 sbi
->readdir_ra
= true;
4378 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
4380 struct f2fs_sb_info
*sbi
;
4381 struct f2fs_super_block
*raw_super
;
4384 bool skip_recovery
= false, need_fsck
= false;
4385 char *options
= NULL
;
4386 int recovery
, i
, valid_super_block
;
4387 struct curseg_info
*seg_i
;
4390 bool quota_enabled
= false;
4396 valid_super_block
= -1;
4399 /* allocate memory for f2fs-specific super block info */
4400 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
4406 /* initialize locks within allocated memory */
4407 init_f2fs_rwsem(&sbi
->gc_lock
);
4408 mutex_init(&sbi
->writepages
);
4409 init_f2fs_rwsem(&sbi
->cp_global_sem
);
4410 init_f2fs_rwsem(&sbi
->node_write
);
4411 init_f2fs_rwsem(&sbi
->node_change
);
4412 spin_lock_init(&sbi
->stat_lock
);
4413 init_f2fs_rwsem(&sbi
->cp_rwsem
);
4414 init_f2fs_rwsem(&sbi
->quota_sem
);
4415 init_waitqueue_head(&sbi
->cp_wait
);
4416 spin_lock_init(&sbi
->error_lock
);
4418 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
4419 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
4420 spin_lock_init(&sbi
->inode_lock
[i
]);
4422 mutex_init(&sbi
->flush_lock
);
4424 /* Load the checksum driver */
4425 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
4426 if (IS_ERR(sbi
->s_chksum_driver
)) {
4427 f2fs_err(sbi
, "Cannot load crc32 driver.");
4428 err
= PTR_ERR(sbi
->s_chksum_driver
);
4429 sbi
->s_chksum_driver
= NULL
;
4433 /* set a block size */
4434 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
4435 f2fs_err(sbi
, "unable to set blocksize");
4439 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
4444 sb
->s_fs_info
= sbi
;
4445 sbi
->raw_super
= raw_super
;
4447 INIT_WORK(&sbi
->s_error_work
, f2fs_record_error_work
);
4448 memcpy(sbi
->errors
, raw_super
->s_errors
, MAX_F2FS_ERRORS
);
4449 memcpy(sbi
->stop_reason
, raw_super
->s_stop_reason
, MAX_STOP_REASON
);
4451 /* precompute checksum seed for metadata */
4452 if (f2fs_sb_has_inode_chksum(sbi
))
4453 sbi
->s_chksum_seed
= f2fs_chksum(sbi
, ~0, raw_super
->uuid
,
4454 sizeof(raw_super
->uuid
));
4456 default_options(sbi
, false);
4457 /* parse mount options */
4458 options
= kstrdup((const char *)data
, GFP_KERNEL
);
4459 if (data
&& !options
) {
4464 err
= parse_options(sb
, options
, false);
4468 sb
->s_maxbytes
= max_file_blocks(NULL
) <<
4469 le32_to_cpu(raw_super
->log_blocksize
);
4470 sb
->s_max_links
= F2FS_LINK_MAX
;
4472 err
= f2fs_setup_casefold(sbi
);
4477 sb
->dq_op
= &f2fs_quota_operations
;
4478 sb
->s_qcop
= &f2fs_quotactl_ops
;
4479 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
4481 if (f2fs_sb_has_quota_ino(sbi
)) {
4482 for (i
= 0; i
< MAXQUOTAS
; i
++) {
4483 if (f2fs_qf_ino(sbi
->sb
, i
))
4484 sbi
->nquota_files
++;
4489 sb
->s_op
= &f2fs_sops
;
4490 #ifdef CONFIG_FS_ENCRYPTION
4491 sb
->s_cop
= &f2fs_cryptops
;
4493 #ifdef CONFIG_FS_VERITY
4494 sb
->s_vop
= &f2fs_verityops
;
4496 sb
->s_xattr
= f2fs_xattr_handlers
;
4497 sb
->s_export_op
= &f2fs_export_ops
;
4498 sb
->s_magic
= F2FS_SUPER_MAGIC
;
4499 sb
->s_time_gran
= 1;
4500 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
4501 (test_opt(sbi
, POSIX_ACL
) ? SB_POSIXACL
: 0);
4502 super_set_uuid(sb
, (void *) raw_super
->uuid
, sizeof(raw_super
->uuid
));
4503 super_set_sysfs_name_bdev(sb
);
4504 sb
->s_iflags
|= SB_I_CGROUPWB
;
4506 /* init f2fs-specific super block info */
4507 sbi
->valid_super_block
= valid_super_block
;
4509 /* disallow all the data/node/meta page writes */
4510 set_sbi_flag(sbi
, SBI_POR_DOING
);
4512 err
= f2fs_init_write_merge_io(sbi
);
4518 err
= f2fs_init_iostat(sbi
);
4522 err
= init_percpu_info(sbi
);
4526 /* init per sbi slab cache */
4527 err
= f2fs_init_xattr_caches(sbi
);
4530 err
= f2fs_init_page_array_cache(sbi
);
4532 goto free_xattr_cache
;
4534 /* get an inode for meta space */
4535 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
4536 if (IS_ERR(sbi
->meta_inode
)) {
4537 f2fs_err(sbi
, "Failed to read F2FS meta data inode");
4538 err
= PTR_ERR(sbi
->meta_inode
);
4539 goto free_page_array_cache
;
4542 err
= f2fs_get_valid_checkpoint(sbi
);
4544 f2fs_err(sbi
, "Failed to get valid F2FS checkpoint");
4545 goto free_meta_inode
;
4548 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_QUOTA_NEED_FSCK_FLAG
))
4549 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
4550 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_DISABLED_QUICK_FLAG
)) {
4551 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
4552 sbi
->interval_time
[DISABLE_TIME
] = DEF_DISABLE_QUICK_INTERVAL
;
4555 if (__is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_FSCK_FLAG
))
4556 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
4558 /* Initialize device list */
4559 err
= f2fs_scan_devices(sbi
);
4561 f2fs_err(sbi
, "Failed to find devices");
4565 err
= f2fs_init_post_read_wq(sbi
);
4567 f2fs_err(sbi
, "Failed to initialize post read workqueue");
4571 sbi
->total_valid_node_count
=
4572 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
4573 percpu_counter_set(&sbi
->total_valid_inode_count
,
4574 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
4575 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
4576 sbi
->total_valid_block_count
=
4577 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
4578 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
4579 sbi
->reserved_blocks
= 0;
4580 sbi
->current_reserved_blocks
= 0;
4581 limit_reserve_root(sbi
);
4582 adjust_unusable_cap_perc(sbi
);
4584 f2fs_init_extent_cache_info(sbi
);
4586 f2fs_init_ino_entry_info(sbi
);
4588 f2fs_init_fsync_node_info(sbi
);
4590 /* setup checkpoint request control and start checkpoint issue thread */
4591 f2fs_init_ckpt_req_control(sbi
);
4592 if (!f2fs_readonly(sb
) && !test_opt(sbi
, DISABLE_CHECKPOINT
) &&
4593 test_opt(sbi
, MERGE_CHECKPOINT
)) {
4594 err
= f2fs_start_ckpt_thread(sbi
);
4597 "Failed to start F2FS issue_checkpoint_thread (%d)",
4599 goto stop_ckpt_thread
;
4603 /* setup f2fs internal modules */
4604 err
= f2fs_build_segment_manager(sbi
);
4606 f2fs_err(sbi
, "Failed to initialize F2FS segment manager (%d)",
4610 err
= f2fs_build_node_manager(sbi
);
4612 f2fs_err(sbi
, "Failed to initialize F2FS node manager (%d)",
4617 /* For write statistics */
4618 sbi
->sectors_written_start
= f2fs_get_sectors_written(sbi
);
4620 /* Read accumulated write IO statistics if exists */
4621 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
4622 if (__exist_node_summaries(sbi
))
4623 sbi
->kbytes_written
=
4624 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
4626 f2fs_build_gc_manager(sbi
);
4628 err
= f2fs_build_stats(sbi
);
4632 /* get an inode for node space */
4633 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
4634 if (IS_ERR(sbi
->node_inode
)) {
4635 f2fs_err(sbi
, "Failed to read node inode");
4636 err
= PTR_ERR(sbi
->node_inode
);
4640 /* read root inode and dentry */
4641 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
4643 f2fs_err(sbi
, "Failed to read root inode");
4644 err
= PTR_ERR(root
);
4645 goto free_node_inode
;
4647 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
||
4648 !root
->i_size
|| !root
->i_nlink
) {
4651 goto free_node_inode
;
4654 generic_set_sb_d_ops(sb
);
4655 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
4658 goto free_node_inode
;
4661 err
= f2fs_init_compress_inode(sbi
);
4663 goto free_root_inode
;
4665 err
= f2fs_register_sysfs(sbi
);
4667 goto free_compress_inode
;
4670 /* Enable quota usage during mount */
4671 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
)) {
4672 err
= f2fs_enable_quotas(sb
);
4674 f2fs_err(sbi
, "Cannot turn on quotas: error %d", err
);
4677 quota_enabled
= f2fs_recover_quota_begin(sbi
);
4679 /* if there are any orphan inodes, free them */
4680 err
= f2fs_recover_orphan_inodes(sbi
);
4684 if (unlikely(is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)))
4685 goto reset_checkpoint
;
4687 /* recover fsynced data */
4688 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
) &&
4689 !test_opt(sbi
, NORECOVERY
)) {
4691 * mount should be failed, when device has readonly mode, and
4692 * previous checkpoint was not done by clean system shutdown.
4694 if (f2fs_hw_is_readonly(sbi
)) {
4695 if (!is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
4696 err
= f2fs_recover_fsync_data(sbi
, true);
4699 f2fs_err(sbi
, "Need to recover fsync data, but "
4700 "write access unavailable, please try "
4701 "mount w/ disable_roll_forward or norecovery");
4706 f2fs_info(sbi
, "write access unavailable, skipping recovery");
4707 goto reset_checkpoint
;
4711 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
4714 goto reset_checkpoint
;
4716 err
= f2fs_recover_fsync_data(sbi
, false);
4719 skip_recovery
= true;
4721 f2fs_err(sbi
, "Cannot recover all fsync data errno=%d",
4726 err
= f2fs_recover_fsync_data(sbi
, true);
4728 if (!f2fs_readonly(sb
) && err
> 0) {
4730 f2fs_err(sbi
, "Need to recover fsync data");
4736 f2fs_recover_quota_end(sbi
, quota_enabled
);
4740 * If the f2fs is not readonly and fsync data recovery succeeds,
4741 * check zoned block devices' write pointer consistency.
4743 if (f2fs_sb_has_blkzoned(sbi
) && !f2fs_readonly(sb
)) {
4746 f2fs_notice(sbi
, "Checking entire write pointers");
4747 err2
= f2fs_check_write_pointer(sbi
);
4754 err
= f2fs_init_inmem_curseg(sbi
);
4756 goto sync_free_meta
;
4758 /* f2fs_recover_fsync_data() cleared this already */
4759 clear_sbi_flag(sbi
, SBI_POR_DOING
);
4761 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
4762 err
= f2fs_disable_checkpoint(sbi
);
4764 goto sync_free_meta
;
4765 } else if (is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
)) {
4766 f2fs_enable_checkpoint(sbi
);
4770 * If filesystem is not mounted as read-only then
4771 * do start the gc_thread.
4773 if ((F2FS_OPTION(sbi
).bggc_mode
!= BGGC_MODE_OFF
||
4774 test_opt(sbi
, GC_MERGE
)) && !f2fs_readonly(sb
)) {
4775 /* After POR, we can run background GC thread.*/
4776 err
= f2fs_start_gc_thread(sbi
);
4778 goto sync_free_meta
;
4782 /* recover broken superblock */
4784 err
= f2fs_commit_super(sbi
, true);
4785 f2fs_info(sbi
, "Try to recover %dth superblock, ret: %d",
4786 sbi
->valid_super_block
? 1 : 2, err
);
4789 f2fs_join_shrinker(sbi
);
4791 f2fs_tuning_parameters(sbi
);
4793 f2fs_notice(sbi
, "Mounted with checkpoint version = %llx",
4794 cur_cp_version(F2FS_CKPT(sbi
)));
4795 f2fs_update_time(sbi
, CP_TIME
);
4796 f2fs_update_time(sbi
, REQ_TIME
);
4797 clear_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
4801 /* safe to flush all the data */
4802 sync_filesystem(sbi
->sb
);
4807 f2fs_truncate_quota_inode_pages(sb
);
4808 if (f2fs_sb_has_quota_ino(sbi
) && !f2fs_readonly(sb
))
4809 f2fs_quota_off_umount(sbi
->sb
);
4812 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4813 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4814 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4815 * falls into an infinite loop in f2fs_sync_meta_pages().
4817 truncate_inode_pages_final(META_MAPPING(sbi
));
4818 /* evict some inodes being cached by GC */
4820 f2fs_unregister_sysfs(sbi
);
4821 free_compress_inode
:
4822 f2fs_destroy_compress_inode(sbi
);
4827 f2fs_release_ino_entry(sbi
, true);
4828 truncate_inode_pages_final(NODE_MAPPING(sbi
));
4829 iput(sbi
->node_inode
);
4830 sbi
->node_inode
= NULL
;
4832 f2fs_destroy_stats(sbi
);
4834 /* stop discard thread before destroying node manager */
4835 f2fs_stop_discard_thread(sbi
);
4836 f2fs_destroy_node_manager(sbi
);
4838 f2fs_destroy_segment_manager(sbi
);
4840 f2fs_stop_ckpt_thread(sbi
);
4841 /* flush s_error_work before sbi destroy */
4842 flush_work(&sbi
->s_error_work
);
4843 f2fs_destroy_post_read_wq(sbi
);
4845 destroy_device_list(sbi
);
4848 make_bad_inode(sbi
->meta_inode
);
4849 iput(sbi
->meta_inode
);
4850 sbi
->meta_inode
= NULL
;
4851 free_page_array_cache
:
4852 f2fs_destroy_page_array_cache(sbi
);
4854 f2fs_destroy_xattr_caches(sbi
);
4856 destroy_percpu_info(sbi
);
4858 f2fs_destroy_iostat(sbi
);
4860 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
4861 kvfree(sbi
->write_io
[i
]);
4863 #if IS_ENABLED(CONFIG_UNICODE)
4864 utf8_unload(sb
->s_encoding
);
4865 sb
->s_encoding
= NULL
;
4869 for (i
= 0; i
< MAXQUOTAS
; i
++)
4870 kfree(F2FS_OPTION(sbi
).s_qf_names
[i
]);
4872 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi
).dummy_enc_policy
);
4877 if (sbi
->s_chksum_driver
)
4878 crypto_free_shash(sbi
->s_chksum_driver
);
4880 sb
->s_fs_info
= NULL
;
4882 /* give only one another chance */
4883 if (retry_cnt
> 0 && skip_recovery
) {
4885 shrink_dcache_sb(sb
);
4891 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
4892 const char *dev_name
, void *data
)
4894 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
4897 static void kill_f2fs_super(struct super_block
*sb
)
4899 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
4902 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
4903 f2fs_stop_gc_thread(sbi
);
4904 f2fs_stop_discard_thread(sbi
);
4906 #ifdef CONFIG_F2FS_FS_COMPRESSION
4908 * latter evict_inode() can bypass checking and invalidating
4909 * compress inode cache.
4911 if (test_opt(sbi
, COMPRESS_CACHE
))
4912 truncate_inode_pages_final(COMPRESS_MAPPING(sbi
));
4915 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
4916 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
4917 struct cp_control cpc
= {
4918 .reason
= CP_UMOUNT
,
4920 stat_inc_cp_call_count(sbi
, TOTAL_CALL
);
4921 f2fs_write_checkpoint(sbi
, &cpc
);
4924 if (is_sbi_flag_set(sbi
, SBI_IS_RECOVERED
) && f2fs_readonly(sb
))
4925 sb
->s_flags
&= ~SB_RDONLY
;
4927 kill_block_super(sb
);
4928 /* Release block devices last, after fscrypt_destroy_keyring(). */
4930 destroy_device_list(sbi
);
4932 sb
->s_fs_info
= NULL
;
4936 static struct file_system_type f2fs_fs_type
= {
4937 .owner
= THIS_MODULE
,
4939 .mount
= f2fs_mount
,
4940 .kill_sb
= kill_f2fs_super
,
4941 .fs_flags
= FS_REQUIRES_DEV
| FS_ALLOW_IDMAP
,
4943 MODULE_ALIAS_FS("f2fs");
4945 static int __init
init_inodecache(void)
4947 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
4948 sizeof(struct f2fs_inode_info
), 0,
4949 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
4950 return f2fs_inode_cachep
? 0 : -ENOMEM
;
4953 static void destroy_inodecache(void)
4956 * Make sure all delayed rcu free inodes are flushed before we
4960 kmem_cache_destroy(f2fs_inode_cachep
);
4963 static int __init
init_f2fs_fs(void)
4967 err
= init_inodecache();
4970 err
= f2fs_create_node_manager_caches();
4972 goto free_inodecache
;
4973 err
= f2fs_create_segment_manager_caches();
4975 goto free_node_manager_caches
;
4976 err
= f2fs_create_checkpoint_caches();
4978 goto free_segment_manager_caches
;
4979 err
= f2fs_create_recovery_cache();
4981 goto free_checkpoint_caches
;
4982 err
= f2fs_create_extent_cache();
4984 goto free_recovery_cache
;
4985 err
= f2fs_create_garbage_collection_cache();
4987 goto free_extent_cache
;
4988 err
= f2fs_init_sysfs();
4990 goto free_garbage_collection_cache
;
4991 err
= f2fs_init_shrinker();
4994 err
= register_filesystem(&f2fs_fs_type
);
4997 f2fs_create_root_stats();
4998 err
= f2fs_init_post_read_processing();
5000 goto free_root_stats
;
5001 err
= f2fs_init_iostat_processing();
5003 goto free_post_read
;
5004 err
= f2fs_init_bio_entry_cache();
5007 err
= f2fs_init_bioset();
5009 goto free_bio_entry_cache
;
5010 err
= f2fs_init_compress_mempool();
5013 err
= f2fs_init_compress_cache();
5015 goto free_compress_mempool
;
5016 err
= f2fs_create_casefold_cache();
5018 goto free_compress_cache
;
5020 free_compress_cache
:
5021 f2fs_destroy_compress_cache();
5022 free_compress_mempool
:
5023 f2fs_destroy_compress_mempool();
5025 f2fs_destroy_bioset();
5026 free_bio_entry_cache
:
5027 f2fs_destroy_bio_entry_cache();
5029 f2fs_destroy_iostat_processing();
5031 f2fs_destroy_post_read_processing();
5033 f2fs_destroy_root_stats();
5034 unregister_filesystem(&f2fs_fs_type
);
5036 f2fs_exit_shrinker();
5039 free_garbage_collection_cache
:
5040 f2fs_destroy_garbage_collection_cache();
5042 f2fs_destroy_extent_cache();
5043 free_recovery_cache
:
5044 f2fs_destroy_recovery_cache();
5045 free_checkpoint_caches
:
5046 f2fs_destroy_checkpoint_caches();
5047 free_segment_manager_caches
:
5048 f2fs_destroy_segment_manager_caches();
5049 free_node_manager_caches
:
5050 f2fs_destroy_node_manager_caches();
5052 destroy_inodecache();
5057 static void __exit
exit_f2fs_fs(void)
5059 f2fs_destroy_casefold_cache();
5060 f2fs_destroy_compress_cache();
5061 f2fs_destroy_compress_mempool();
5062 f2fs_destroy_bioset();
5063 f2fs_destroy_bio_entry_cache();
5064 f2fs_destroy_iostat_processing();
5065 f2fs_destroy_post_read_processing();
5066 f2fs_destroy_root_stats();
5067 unregister_filesystem(&f2fs_fs_type
);
5068 f2fs_exit_shrinker();
5070 f2fs_destroy_garbage_collection_cache();
5071 f2fs_destroy_extent_cache();
5072 f2fs_destroy_recovery_cache();
5073 f2fs_destroy_checkpoint_caches();
5074 f2fs_destroy_segment_manager_caches();
5075 f2fs_destroy_node_manager_caches();
5076 destroy_inodecache();
5079 module_init(init_f2fs_fs
)
5080 module_exit(exit_f2fs_fs
)
5082 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
5083 MODULE_DESCRIPTION("Flash Friendly File System");
5084 MODULE_LICENSE("GPL");
5085 MODULE_SOFTDEP("pre: crc32");