ptrace: cleanup arch_ptrace() on xtensa
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ext4 / super.c
blob8ecc1e590303841b0c5f13d07568082230acdf4d
1 /*
2 * linux/fs/ext4/super.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Big-endian to little-endian byte-swapping/bitmaps by
16 * David S. Miller (davem@caip.rutgers.edu), 1995
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/fs.h>
22 #include <linux/time.h>
23 #include <linux/vmalloc.h>
24 #include <linux/jbd2.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/parser.h>
29 #include <linux/buffer_head.h>
30 #include <linux/exportfs.h>
31 #include <linux/vfs.h>
32 #include <linux/random.h>
33 #include <linux/mount.h>
34 #include <linux/namei.h>
35 #include <linux/quotaops.h>
36 #include <linux/seq_file.h>
37 #include <linux/proc_fs.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <asm/uaccess.h>
43 #include "ext4.h"
44 #include "ext4_jbd2.h"
45 #include "xattr.h"
46 #include "acl.h"
47 #include "mballoc.h"
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/ext4.h>
52 struct proc_dir_entry *ext4_proc_root;
53 static struct kset *ext4_kset;
55 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
56 unsigned long journal_devnum);
57 static int ext4_commit_super(struct super_block *sb, int sync);
58 static void ext4_mark_recovery_complete(struct super_block *sb,
59 struct ext4_super_block *es);
60 static void ext4_clear_journal_err(struct super_block *sb,
61 struct ext4_super_block *es);
62 static int ext4_sync_fs(struct super_block *sb, int wait);
63 static const char *ext4_decode_error(struct super_block *sb, int errno,
64 char nbuf[16]);
65 static int ext4_remount(struct super_block *sb, int *flags, char *data);
66 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
67 static int ext4_unfreeze(struct super_block *sb);
68 static void ext4_write_super(struct super_block *sb);
69 static int ext4_freeze(struct super_block *sb);
70 static int ext4_get_sb(struct file_system_type *fs_type, int flags,
71 const char *dev_name, void *data, struct vfsmount *mnt);
73 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
74 static struct file_system_type ext3_fs_type = {
75 .owner = THIS_MODULE,
76 .name = "ext3",
77 .get_sb = ext4_get_sb,
78 .kill_sb = kill_block_super,
79 .fs_flags = FS_REQUIRES_DEV,
81 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
82 #else
83 #define IS_EXT3_SB(sb) (0)
84 #endif
86 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
87 struct ext4_group_desc *bg)
89 return le32_to_cpu(bg->bg_block_bitmap_lo) |
90 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
91 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
94 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
95 struct ext4_group_desc *bg)
97 return le32_to_cpu(bg->bg_inode_bitmap_lo) |
98 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
99 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
102 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
103 struct ext4_group_desc *bg)
105 return le32_to_cpu(bg->bg_inode_table_lo) |
106 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
107 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
110 __u32 ext4_free_blks_count(struct super_block *sb,
111 struct ext4_group_desc *bg)
113 return le16_to_cpu(bg->bg_free_blocks_count_lo) |
114 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
115 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
118 __u32 ext4_free_inodes_count(struct super_block *sb,
119 struct ext4_group_desc *bg)
121 return le16_to_cpu(bg->bg_free_inodes_count_lo) |
122 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
123 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
126 __u32 ext4_used_dirs_count(struct super_block *sb,
127 struct ext4_group_desc *bg)
129 return le16_to_cpu(bg->bg_used_dirs_count_lo) |
130 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
131 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
134 __u32 ext4_itable_unused_count(struct super_block *sb,
135 struct ext4_group_desc *bg)
137 return le16_to_cpu(bg->bg_itable_unused_lo) |
138 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
139 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
142 void ext4_block_bitmap_set(struct super_block *sb,
143 struct ext4_group_desc *bg, ext4_fsblk_t blk)
145 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
146 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
147 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
150 void ext4_inode_bitmap_set(struct super_block *sb,
151 struct ext4_group_desc *bg, ext4_fsblk_t blk)
153 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
154 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
155 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
158 void ext4_inode_table_set(struct super_block *sb,
159 struct ext4_group_desc *bg, ext4_fsblk_t blk)
161 bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
162 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
163 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
166 void ext4_free_blks_set(struct super_block *sb,
167 struct ext4_group_desc *bg, __u32 count)
169 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
170 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
171 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
174 void ext4_free_inodes_set(struct super_block *sb,
175 struct ext4_group_desc *bg, __u32 count)
177 bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
178 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
179 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
182 void ext4_used_dirs_set(struct super_block *sb,
183 struct ext4_group_desc *bg, __u32 count)
185 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
186 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
187 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
190 void ext4_itable_unused_set(struct super_block *sb,
191 struct ext4_group_desc *bg, __u32 count)
193 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
194 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
195 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
199 /* Just increment the non-pointer handle value */
200 static handle_t *ext4_get_nojournal(void)
202 handle_t *handle = current->journal_info;
203 unsigned long ref_cnt = (unsigned long)handle;
205 BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
207 ref_cnt++;
208 handle = (handle_t *)ref_cnt;
210 current->journal_info = handle;
211 return handle;
215 /* Decrement the non-pointer handle value */
216 static void ext4_put_nojournal(handle_t *handle)
218 unsigned long ref_cnt = (unsigned long)handle;
220 BUG_ON(ref_cnt == 0);
222 ref_cnt--;
223 handle = (handle_t *)ref_cnt;
225 current->journal_info = handle;
229 * Wrappers for jbd2_journal_start/end.
231 * The only special thing we need to do here is to make sure that all
232 * journal_end calls result in the superblock being marked dirty, so
233 * that sync() will call the filesystem's write_super callback if
234 * appropriate.
236 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
238 journal_t *journal;
240 if (sb->s_flags & MS_RDONLY)
241 return ERR_PTR(-EROFS);
243 vfs_check_frozen(sb, SB_FREEZE_TRANS);
244 /* Special case here: if the journal has aborted behind our
245 * backs (eg. EIO in the commit thread), then we still need to
246 * take the FS itself readonly cleanly. */
247 journal = EXT4_SB(sb)->s_journal;
248 if (journal) {
249 if (is_journal_aborted(journal)) {
250 ext4_abort(sb, "Detected aborted journal");
251 return ERR_PTR(-EROFS);
253 return jbd2_journal_start(journal, nblocks);
255 return ext4_get_nojournal();
259 * The only special thing we need to do here is to make sure that all
260 * jbd2_journal_stop calls result in the superblock being marked dirty, so
261 * that sync() will call the filesystem's write_super callback if
262 * appropriate.
264 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
266 struct super_block *sb;
267 int err;
268 int rc;
270 if (!ext4_handle_valid(handle)) {
271 ext4_put_nojournal(handle);
272 return 0;
274 sb = handle->h_transaction->t_journal->j_private;
275 err = handle->h_err;
276 rc = jbd2_journal_stop(handle);
278 if (!err)
279 err = rc;
280 if (err)
281 __ext4_std_error(sb, where, line, err);
282 return err;
285 void ext4_journal_abort_handle(const char *caller, unsigned int line,
286 const char *err_fn, struct buffer_head *bh,
287 handle_t *handle, int err)
289 char nbuf[16];
290 const char *errstr = ext4_decode_error(NULL, err, nbuf);
292 BUG_ON(!ext4_handle_valid(handle));
294 if (bh)
295 BUFFER_TRACE(bh, "abort");
297 if (!handle->h_err)
298 handle->h_err = err;
300 if (is_handle_aborted(handle))
301 return;
303 printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n",
304 caller, line, errstr, err_fn);
306 jbd2_journal_abort_handle(handle);
309 static void __save_error_info(struct super_block *sb, const char *func,
310 unsigned int line)
312 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
314 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
315 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
316 es->s_last_error_time = cpu_to_le32(get_seconds());
317 strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
318 es->s_last_error_line = cpu_to_le32(line);
319 if (!es->s_first_error_time) {
320 es->s_first_error_time = es->s_last_error_time;
321 strncpy(es->s_first_error_func, func,
322 sizeof(es->s_first_error_func));
323 es->s_first_error_line = cpu_to_le32(line);
324 es->s_first_error_ino = es->s_last_error_ino;
325 es->s_first_error_block = es->s_last_error_block;
328 * Start the daily error reporting function if it hasn't been
329 * started already
331 if (!es->s_error_count)
332 mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
333 es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1);
336 static void save_error_info(struct super_block *sb, const char *func,
337 unsigned int line)
339 __save_error_info(sb, func, line);
340 ext4_commit_super(sb, 1);
344 /* Deal with the reporting of failure conditions on a filesystem such as
345 * inconsistencies detected or read IO failures.
347 * On ext2, we can store the error state of the filesystem in the
348 * superblock. That is not possible on ext4, because we may have other
349 * write ordering constraints on the superblock which prevent us from
350 * writing it out straight away; and given that the journal is about to
351 * be aborted, we can't rely on the current, or future, transactions to
352 * write out the superblock safely.
354 * We'll just use the jbd2_journal_abort() error code to record an error in
355 * the journal instead. On recovery, the journal will complain about
356 * that error until we've noted it down and cleared it.
359 static void ext4_handle_error(struct super_block *sb)
361 if (sb->s_flags & MS_RDONLY)
362 return;
364 if (!test_opt(sb, ERRORS_CONT)) {
365 journal_t *journal = EXT4_SB(sb)->s_journal;
367 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
368 if (journal)
369 jbd2_journal_abort(journal, -EIO);
371 if (test_opt(sb, ERRORS_RO)) {
372 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
373 sb->s_flags |= MS_RDONLY;
375 if (test_opt(sb, ERRORS_PANIC))
376 panic("EXT4-fs (device %s): panic forced after error\n",
377 sb->s_id);
380 void __ext4_error(struct super_block *sb, const char *function,
381 unsigned int line, const char *fmt, ...)
383 va_list args;
385 va_start(args, fmt);
386 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
387 sb->s_id, function, line, current->comm);
388 vprintk(fmt, args);
389 printk("\n");
390 va_end(args);
392 ext4_handle_error(sb);
395 void ext4_error_inode(struct inode *inode, const char *function,
396 unsigned int line, ext4_fsblk_t block,
397 const char *fmt, ...)
399 va_list args;
400 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
402 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
403 es->s_last_error_block = cpu_to_le64(block);
404 save_error_info(inode->i_sb, function, line);
405 va_start(args, fmt);
406 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
407 inode->i_sb->s_id, function, line, inode->i_ino);
408 if (block)
409 printk("block %llu: ", block);
410 printk("comm %s: ", current->comm);
411 vprintk(fmt, args);
412 printk("\n");
413 va_end(args);
415 ext4_handle_error(inode->i_sb);
418 void ext4_error_file(struct file *file, const char *function,
419 unsigned int line, const char *fmt, ...)
421 va_list args;
422 struct ext4_super_block *es;
423 struct inode *inode = file->f_dentry->d_inode;
424 char pathname[80], *path;
426 es = EXT4_SB(inode->i_sb)->s_es;
427 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
428 save_error_info(inode->i_sb, function, line);
429 va_start(args, fmt);
430 path = d_path(&(file->f_path), pathname, sizeof(pathname));
431 if (!path)
432 path = "(unknown)";
433 printk(KERN_CRIT
434 "EXT4-fs error (device %s): %s:%d: inode #%lu "
435 "(comm %s path %s): ",
436 inode->i_sb->s_id, function, line, inode->i_ino,
437 current->comm, path);
438 vprintk(fmt, args);
439 printk("\n");
440 va_end(args);
442 ext4_handle_error(inode->i_sb);
445 static const char *ext4_decode_error(struct super_block *sb, int errno,
446 char nbuf[16])
448 char *errstr = NULL;
450 switch (errno) {
451 case -EIO:
452 errstr = "IO failure";
453 break;
454 case -ENOMEM:
455 errstr = "Out of memory";
456 break;
457 case -EROFS:
458 if (!sb || (EXT4_SB(sb)->s_journal &&
459 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
460 errstr = "Journal has aborted";
461 else
462 errstr = "Readonly filesystem";
463 break;
464 default:
465 /* If the caller passed in an extra buffer for unknown
466 * errors, textualise them now. Else we just return
467 * NULL. */
468 if (nbuf) {
469 /* Check for truncated error codes... */
470 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
471 errstr = nbuf;
473 break;
476 return errstr;
479 /* __ext4_std_error decodes expected errors from journaling functions
480 * automatically and invokes the appropriate error response. */
482 void __ext4_std_error(struct super_block *sb, const char *function,
483 unsigned int line, int errno)
485 char nbuf[16];
486 const char *errstr;
488 /* Special case: if the error is EROFS, and we're not already
489 * inside a transaction, then there's really no point in logging
490 * an error. */
491 if (errno == -EROFS && journal_current_handle() == NULL &&
492 (sb->s_flags & MS_RDONLY))
493 return;
495 errstr = ext4_decode_error(sb, errno, nbuf);
496 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
497 sb->s_id, function, line, errstr);
498 save_error_info(sb, function, line);
500 ext4_handle_error(sb);
504 * ext4_abort is a much stronger failure handler than ext4_error. The
505 * abort function may be used to deal with unrecoverable failures such
506 * as journal IO errors or ENOMEM at a critical moment in log management.
508 * We unconditionally force the filesystem into an ABORT|READONLY state,
509 * unless the error response on the fs has been set to panic in which
510 * case we take the easy way out and panic immediately.
513 void __ext4_abort(struct super_block *sb, const char *function,
514 unsigned int line, const char *fmt, ...)
516 va_list args;
518 save_error_info(sb, function, line);
519 va_start(args, fmt);
520 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
521 function, line);
522 vprintk(fmt, args);
523 printk("\n");
524 va_end(args);
526 if ((sb->s_flags & MS_RDONLY) == 0) {
527 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
528 sb->s_flags |= MS_RDONLY;
529 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
530 if (EXT4_SB(sb)->s_journal)
531 jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
532 save_error_info(sb, function, line);
534 if (test_opt(sb, ERRORS_PANIC))
535 panic("EXT4-fs panic from previous error\n");
538 void ext4_msg (struct super_block * sb, const char *prefix,
539 const char *fmt, ...)
541 va_list args;
543 va_start(args, fmt);
544 printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
545 vprintk(fmt, args);
546 printk("\n");
547 va_end(args);
550 void __ext4_warning(struct super_block *sb, const char *function,
551 unsigned int line, const char *fmt, ...)
553 va_list args;
555 va_start(args, fmt);
556 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
557 sb->s_id, function, line);
558 vprintk(fmt, args);
559 printk("\n");
560 va_end(args);
563 void __ext4_grp_locked_error(const char *function, unsigned int line,
564 struct super_block *sb, ext4_group_t grp,
565 unsigned long ino, ext4_fsblk_t block,
566 const char *fmt, ...)
567 __releases(bitlock)
568 __acquires(bitlock)
570 va_list args;
571 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
573 es->s_last_error_ino = cpu_to_le32(ino);
574 es->s_last_error_block = cpu_to_le64(block);
575 __save_error_info(sb, function, line);
576 va_start(args, fmt);
577 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
578 sb->s_id, function, line, grp);
579 if (ino)
580 printk("inode %lu: ", ino);
581 if (block)
582 printk("block %llu:", (unsigned long long) block);
583 vprintk(fmt, args);
584 printk("\n");
585 va_end(args);
587 if (test_opt(sb, ERRORS_CONT)) {
588 ext4_commit_super(sb, 0);
589 return;
592 ext4_unlock_group(sb, grp);
593 ext4_handle_error(sb);
595 * We only get here in the ERRORS_RO case; relocking the group
596 * may be dangerous, but nothing bad will happen since the
597 * filesystem will have already been marked read/only and the
598 * journal has been aborted. We return 1 as a hint to callers
599 * who might what to use the return value from
600 * ext4_grp_locked_error() to distinguish beween the
601 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
602 * aggressively from the ext4 function in question, with a
603 * more appropriate error code.
605 ext4_lock_group(sb, grp);
606 return;
609 void ext4_update_dynamic_rev(struct super_block *sb)
611 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
613 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
614 return;
616 ext4_warning(sb,
617 "updating to rev %d because of new feature flag, "
618 "running e2fsck is recommended",
619 EXT4_DYNAMIC_REV);
621 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
622 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
623 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
624 /* leave es->s_feature_*compat flags alone */
625 /* es->s_uuid will be set by e2fsck if empty */
628 * The rest of the superblock fields should be zero, and if not it
629 * means they are likely already in use, so leave them alone. We
630 * can leave it up to e2fsck to clean up any inconsistencies there.
635 * Open the external journal device
637 static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
639 struct block_device *bdev;
640 char b[BDEVNAME_SIZE];
642 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
643 if (IS_ERR(bdev))
644 goto fail;
645 return bdev;
647 fail:
648 ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
649 __bdevname(dev, b), PTR_ERR(bdev));
650 return NULL;
654 * Release the journal device
656 static int ext4_blkdev_put(struct block_device *bdev)
658 bd_release(bdev);
659 return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
662 static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
664 struct block_device *bdev;
665 int ret = -ENODEV;
667 bdev = sbi->journal_bdev;
668 if (bdev) {
669 ret = ext4_blkdev_put(bdev);
670 sbi->journal_bdev = NULL;
672 return ret;
675 static inline struct inode *orphan_list_entry(struct list_head *l)
677 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
680 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
682 struct list_head *l;
684 ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
685 le32_to_cpu(sbi->s_es->s_last_orphan));
687 printk(KERN_ERR "sb_info orphan list:\n");
688 list_for_each(l, &sbi->s_orphan) {
689 struct inode *inode = orphan_list_entry(l);
690 printk(KERN_ERR " "
691 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
692 inode->i_sb->s_id, inode->i_ino, inode,
693 inode->i_mode, inode->i_nlink,
694 NEXT_ORPHAN(inode));
698 static void ext4_put_super(struct super_block *sb)
700 struct ext4_sb_info *sbi = EXT4_SB(sb);
701 struct ext4_super_block *es = sbi->s_es;
702 int i, err;
704 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
706 flush_workqueue(sbi->dio_unwritten_wq);
707 destroy_workqueue(sbi->dio_unwritten_wq);
709 lock_super(sb);
710 if (sb->s_dirt)
711 ext4_commit_super(sb, 1);
713 if (sbi->s_journal) {
714 err = jbd2_journal_destroy(sbi->s_journal);
715 sbi->s_journal = NULL;
716 if (err < 0)
717 ext4_abort(sb, "Couldn't clean up the journal");
720 ext4_release_system_zone(sb);
721 ext4_mb_release(sb);
722 ext4_ext_release(sb);
723 ext4_xattr_put_super(sb);
725 if (!(sb->s_flags & MS_RDONLY)) {
726 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
727 es->s_state = cpu_to_le16(sbi->s_mount_state);
728 ext4_commit_super(sb, 1);
730 if (sbi->s_proc) {
731 remove_proc_entry(sb->s_id, ext4_proc_root);
733 kobject_del(&sbi->s_kobj);
735 for (i = 0; i < sbi->s_gdb_count; i++)
736 brelse(sbi->s_group_desc[i]);
737 kfree(sbi->s_group_desc);
738 if (is_vmalloc_addr(sbi->s_flex_groups))
739 vfree(sbi->s_flex_groups);
740 else
741 kfree(sbi->s_flex_groups);
742 percpu_counter_destroy(&sbi->s_freeblocks_counter);
743 percpu_counter_destroy(&sbi->s_freeinodes_counter);
744 percpu_counter_destroy(&sbi->s_dirs_counter);
745 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
746 brelse(sbi->s_sbh);
747 #ifdef CONFIG_QUOTA
748 for (i = 0; i < MAXQUOTAS; i++)
749 kfree(sbi->s_qf_names[i]);
750 #endif
752 /* Debugging code just in case the in-memory inode orphan list
753 * isn't empty. The on-disk one can be non-empty if we've
754 * detected an error and taken the fs readonly, but the
755 * in-memory list had better be clean by this point. */
756 if (!list_empty(&sbi->s_orphan))
757 dump_orphan_list(sb, sbi);
758 J_ASSERT(list_empty(&sbi->s_orphan));
760 invalidate_bdev(sb->s_bdev);
761 if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
763 * Invalidate the journal device's buffers. We don't want them
764 * floating about in memory - the physical journal device may
765 * hotswapped, and it breaks the `ro-after' testing code.
767 sync_blockdev(sbi->journal_bdev);
768 invalidate_bdev(sbi->journal_bdev);
769 ext4_blkdev_remove(sbi);
771 sb->s_fs_info = NULL;
773 * Now that we are completely done shutting down the
774 * superblock, we need to actually destroy the kobject.
776 unlock_super(sb);
777 kobject_put(&sbi->s_kobj);
778 wait_for_completion(&sbi->s_kobj_unregister);
779 kfree(sbi->s_blockgroup_lock);
780 kfree(sbi);
783 static struct kmem_cache *ext4_inode_cachep;
786 * Called inside transaction, so use GFP_NOFS
788 static struct inode *ext4_alloc_inode(struct super_block *sb)
790 struct ext4_inode_info *ei;
792 ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
793 if (!ei)
794 return NULL;
796 ei->vfs_inode.i_version = 1;
797 ei->vfs_inode.i_data.writeback_index = 0;
798 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
799 INIT_LIST_HEAD(&ei->i_prealloc_list);
800 spin_lock_init(&ei->i_prealloc_lock);
802 * Note: We can be called before EXT4_SB(sb)->s_journal is set,
803 * therefore it can be null here. Don't check it, just initialize
804 * jinode.
806 jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
807 ei->i_reserved_data_blocks = 0;
808 ei->i_reserved_meta_blocks = 0;
809 ei->i_allocated_meta_blocks = 0;
810 ei->i_da_metadata_calc_len = 0;
811 ei->i_delalloc_reserved_flag = 0;
812 spin_lock_init(&(ei->i_block_reservation_lock));
813 #ifdef CONFIG_QUOTA
814 ei->i_reserved_quota = 0;
815 #endif
816 INIT_LIST_HEAD(&ei->i_completed_io_list);
817 spin_lock_init(&ei->i_completed_io_lock);
818 ei->cur_aio_dio = NULL;
819 ei->i_sync_tid = 0;
820 ei->i_datasync_tid = 0;
822 return &ei->vfs_inode;
825 static void ext4_destroy_inode(struct inode *inode)
827 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
828 ext4_msg(inode->i_sb, KERN_ERR,
829 "Inode %lu (%p): orphan list check failed!",
830 inode->i_ino, EXT4_I(inode));
831 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
832 EXT4_I(inode), sizeof(struct ext4_inode_info),
833 true);
834 dump_stack();
836 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
839 static void init_once(void *foo)
841 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
843 INIT_LIST_HEAD(&ei->i_orphan);
844 #ifdef CONFIG_EXT4_FS_XATTR
845 init_rwsem(&ei->xattr_sem);
846 #endif
847 init_rwsem(&ei->i_data_sem);
848 inode_init_once(&ei->vfs_inode);
851 static int init_inodecache(void)
853 ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
854 sizeof(struct ext4_inode_info),
855 0, (SLAB_RECLAIM_ACCOUNT|
856 SLAB_MEM_SPREAD),
857 init_once);
858 if (ext4_inode_cachep == NULL)
859 return -ENOMEM;
860 return 0;
863 static void destroy_inodecache(void)
865 kmem_cache_destroy(ext4_inode_cachep);
868 void ext4_clear_inode(struct inode *inode)
870 invalidate_inode_buffers(inode);
871 end_writeback(inode);
872 dquot_drop(inode);
873 ext4_discard_preallocations(inode);
874 if (EXT4_JOURNAL(inode))
875 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
876 &EXT4_I(inode)->jinode);
879 static inline void ext4_show_quota_options(struct seq_file *seq,
880 struct super_block *sb)
882 #if defined(CONFIG_QUOTA)
883 struct ext4_sb_info *sbi = EXT4_SB(sb);
885 if (sbi->s_jquota_fmt) {
886 char *fmtname = "";
888 switch (sbi->s_jquota_fmt) {
889 case QFMT_VFS_OLD:
890 fmtname = "vfsold";
891 break;
892 case QFMT_VFS_V0:
893 fmtname = "vfsv0";
894 break;
895 case QFMT_VFS_V1:
896 fmtname = "vfsv1";
897 break;
899 seq_printf(seq, ",jqfmt=%s", fmtname);
902 if (sbi->s_qf_names[USRQUOTA])
903 seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
905 if (sbi->s_qf_names[GRPQUOTA])
906 seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
908 if (test_opt(sb, USRQUOTA))
909 seq_puts(seq, ",usrquota");
911 if (test_opt(sb, GRPQUOTA))
912 seq_puts(seq, ",grpquota");
913 #endif
917 * Show an option if
918 * - it's set to a non-default value OR
919 * - if the per-sb default is different from the global default
921 static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
923 int def_errors;
924 unsigned long def_mount_opts;
925 struct super_block *sb = vfs->mnt_sb;
926 struct ext4_sb_info *sbi = EXT4_SB(sb);
927 struct ext4_super_block *es = sbi->s_es;
929 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
930 def_errors = le16_to_cpu(es->s_errors);
932 if (sbi->s_sb_block != 1)
933 seq_printf(seq, ",sb=%llu", sbi->s_sb_block);
934 if (test_opt(sb, MINIX_DF))
935 seq_puts(seq, ",minixdf");
936 if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS))
937 seq_puts(seq, ",grpid");
938 if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS))
939 seq_puts(seq, ",nogrpid");
940 if (sbi->s_resuid != EXT4_DEF_RESUID ||
941 le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) {
942 seq_printf(seq, ",resuid=%u", sbi->s_resuid);
944 if (sbi->s_resgid != EXT4_DEF_RESGID ||
945 le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) {
946 seq_printf(seq, ",resgid=%u", sbi->s_resgid);
948 if (test_opt(sb, ERRORS_RO)) {
949 if (def_errors == EXT4_ERRORS_PANIC ||
950 def_errors == EXT4_ERRORS_CONTINUE) {
951 seq_puts(seq, ",errors=remount-ro");
954 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
955 seq_puts(seq, ",errors=continue");
956 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
957 seq_puts(seq, ",errors=panic");
958 if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16))
959 seq_puts(seq, ",nouid32");
960 if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG))
961 seq_puts(seq, ",debug");
962 if (test_opt(sb, OLDALLOC))
963 seq_puts(seq, ",oldalloc");
964 #ifdef CONFIG_EXT4_FS_XATTR
965 if (test_opt(sb, XATTR_USER) &&
966 !(def_mount_opts & EXT4_DEFM_XATTR_USER))
967 seq_puts(seq, ",user_xattr");
968 if (!test_opt(sb, XATTR_USER) &&
969 (def_mount_opts & EXT4_DEFM_XATTR_USER)) {
970 seq_puts(seq, ",nouser_xattr");
972 #endif
973 #ifdef CONFIG_EXT4_FS_POSIX_ACL
974 if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
975 seq_puts(seq, ",acl");
976 if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
977 seq_puts(seq, ",noacl");
978 #endif
979 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
980 seq_printf(seq, ",commit=%u",
981 (unsigned) (sbi->s_commit_interval / HZ));
983 if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
984 seq_printf(seq, ",min_batch_time=%u",
985 (unsigned) sbi->s_min_batch_time);
987 if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
988 seq_printf(seq, ",max_batch_time=%u",
989 (unsigned) sbi->s_min_batch_time);
993 * We're changing the default of barrier mount option, so
994 * let's always display its mount state so it's clear what its
995 * status is.
997 seq_puts(seq, ",barrier=");
998 seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
999 if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
1000 seq_puts(seq, ",journal_async_commit");
1001 else if (test_opt(sb, JOURNAL_CHECKSUM))
1002 seq_puts(seq, ",journal_checksum");
1003 if (test_opt(sb, I_VERSION))
1004 seq_puts(seq, ",i_version");
1005 if (!test_opt(sb, DELALLOC) &&
1006 !(def_mount_opts & EXT4_DEFM_NODELALLOC))
1007 seq_puts(seq, ",nodelalloc");
1009 if (sbi->s_stripe)
1010 seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
1012 * journal mode get enabled in different ways
1013 * So just print the value even if we didn't specify it
1015 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
1016 seq_puts(seq, ",data=journal");
1017 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
1018 seq_puts(seq, ",data=ordered");
1019 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
1020 seq_puts(seq, ",data=writeback");
1022 if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
1023 seq_printf(seq, ",inode_readahead_blks=%u",
1024 sbi->s_inode_readahead_blks);
1026 if (test_opt(sb, DATA_ERR_ABORT))
1027 seq_puts(seq, ",data_err=abort");
1029 if (test_opt(sb, NO_AUTO_DA_ALLOC))
1030 seq_puts(seq, ",noauto_da_alloc");
1032 if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD))
1033 seq_puts(seq, ",discard");
1035 if (test_opt(sb, NOLOAD))
1036 seq_puts(seq, ",norecovery");
1038 if (test_opt(sb, DIOREAD_NOLOCK))
1039 seq_puts(seq, ",dioread_nolock");
1041 if (test_opt(sb, BLOCK_VALIDITY) &&
1042 !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
1043 seq_puts(seq, ",block_validity");
1045 ext4_show_quota_options(seq, sb);
1047 return 0;
1050 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1051 u64 ino, u32 generation)
1053 struct inode *inode;
1055 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1056 return ERR_PTR(-ESTALE);
1057 if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1058 return ERR_PTR(-ESTALE);
1060 /* iget isn't really right if the inode is currently unallocated!!
1062 * ext4_read_inode will return a bad_inode if the inode had been
1063 * deleted, so we should be safe.
1065 * Currently we don't know the generation for parent directory, so
1066 * a generation of 0 means "accept any"
1068 inode = ext4_iget(sb, ino);
1069 if (IS_ERR(inode))
1070 return ERR_CAST(inode);
1071 if (generation && inode->i_generation != generation) {
1072 iput(inode);
1073 return ERR_PTR(-ESTALE);
1076 return inode;
1079 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1080 int fh_len, int fh_type)
1082 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1083 ext4_nfs_get_inode);
1086 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1087 int fh_len, int fh_type)
1089 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1090 ext4_nfs_get_inode);
1094 * Try to release metadata pages (indirect blocks, directories) which are
1095 * mapped via the block device. Since these pages could have journal heads
1096 * which would prevent try_to_free_buffers() from freeing them, we must use
1097 * jbd2 layer's try_to_free_buffers() function to release them.
1099 static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
1100 gfp_t wait)
1102 journal_t *journal = EXT4_SB(sb)->s_journal;
1104 WARN_ON(PageChecked(page));
1105 if (!page_has_buffers(page))
1106 return 0;
1107 if (journal)
1108 return jbd2_journal_try_to_free_buffers(journal, page,
1109 wait & ~__GFP_WAIT);
1110 return try_to_free_buffers(page);
1113 #ifdef CONFIG_QUOTA
1114 #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
1115 #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
1117 static int ext4_write_dquot(struct dquot *dquot);
1118 static int ext4_acquire_dquot(struct dquot *dquot);
1119 static int ext4_release_dquot(struct dquot *dquot);
1120 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1121 static int ext4_write_info(struct super_block *sb, int type);
1122 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1123 char *path);
1124 static int ext4_quota_off(struct super_block *sb, int type);
1125 static int ext4_quota_on_mount(struct super_block *sb, int type);
1126 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1127 size_t len, loff_t off);
1128 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1129 const char *data, size_t len, loff_t off);
1131 static const struct dquot_operations ext4_quota_operations = {
1132 #ifdef CONFIG_QUOTA
1133 .get_reserved_space = ext4_get_reserved_space,
1134 #endif
1135 .write_dquot = ext4_write_dquot,
1136 .acquire_dquot = ext4_acquire_dquot,
1137 .release_dquot = ext4_release_dquot,
1138 .mark_dirty = ext4_mark_dquot_dirty,
1139 .write_info = ext4_write_info,
1140 .alloc_dquot = dquot_alloc,
1141 .destroy_dquot = dquot_destroy,
1144 static const struct quotactl_ops ext4_qctl_operations = {
1145 .quota_on = ext4_quota_on,
1146 .quota_off = ext4_quota_off,
1147 .quota_sync = dquot_quota_sync,
1148 .get_info = dquot_get_dqinfo,
1149 .set_info = dquot_set_dqinfo,
1150 .get_dqblk = dquot_get_dqblk,
1151 .set_dqblk = dquot_set_dqblk
1153 #endif
1155 static const struct super_operations ext4_sops = {
1156 .alloc_inode = ext4_alloc_inode,
1157 .destroy_inode = ext4_destroy_inode,
1158 .write_inode = ext4_write_inode,
1159 .dirty_inode = ext4_dirty_inode,
1160 .evict_inode = ext4_evict_inode,
1161 .put_super = ext4_put_super,
1162 .sync_fs = ext4_sync_fs,
1163 .freeze_fs = ext4_freeze,
1164 .unfreeze_fs = ext4_unfreeze,
1165 .statfs = ext4_statfs,
1166 .remount_fs = ext4_remount,
1167 .show_options = ext4_show_options,
1168 #ifdef CONFIG_QUOTA
1169 .quota_read = ext4_quota_read,
1170 .quota_write = ext4_quota_write,
1171 #endif
1172 .bdev_try_to_free_page = bdev_try_to_free_page,
1175 static const struct super_operations ext4_nojournal_sops = {
1176 .alloc_inode = ext4_alloc_inode,
1177 .destroy_inode = ext4_destroy_inode,
1178 .write_inode = ext4_write_inode,
1179 .dirty_inode = ext4_dirty_inode,
1180 .evict_inode = ext4_evict_inode,
1181 .write_super = ext4_write_super,
1182 .put_super = ext4_put_super,
1183 .statfs = ext4_statfs,
1184 .remount_fs = ext4_remount,
1185 .show_options = ext4_show_options,
1186 #ifdef CONFIG_QUOTA
1187 .quota_read = ext4_quota_read,
1188 .quota_write = ext4_quota_write,
1189 #endif
1190 .bdev_try_to_free_page = bdev_try_to_free_page,
1193 static const struct export_operations ext4_export_ops = {
1194 .fh_to_dentry = ext4_fh_to_dentry,
1195 .fh_to_parent = ext4_fh_to_parent,
1196 .get_parent = ext4_get_parent,
1199 enum {
1200 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1201 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1202 Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
1203 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1204 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh,
1205 Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
1206 Opt_journal_update, Opt_journal_dev,
1207 Opt_journal_checksum, Opt_journal_async_commit,
1208 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1209 Opt_data_err_abort, Opt_data_err_ignore,
1210 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1211 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1212 Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
1213 Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
1214 Opt_stripe, Opt_delalloc, Opt_nodelalloc,
1215 Opt_block_validity, Opt_noblock_validity,
1216 Opt_inode_readahead_blks, Opt_journal_ioprio,
1217 Opt_dioread_nolock, Opt_dioread_lock,
1218 Opt_discard, Opt_nodiscard,
1221 static const match_table_t tokens = {
1222 {Opt_bsd_df, "bsddf"},
1223 {Opt_minix_df, "minixdf"},
1224 {Opt_grpid, "grpid"},
1225 {Opt_grpid, "bsdgroups"},
1226 {Opt_nogrpid, "nogrpid"},
1227 {Opt_nogrpid, "sysvgroups"},
1228 {Opt_resgid, "resgid=%u"},
1229 {Opt_resuid, "resuid=%u"},
1230 {Opt_sb, "sb=%u"},
1231 {Opt_err_cont, "errors=continue"},
1232 {Opt_err_panic, "errors=panic"},
1233 {Opt_err_ro, "errors=remount-ro"},
1234 {Opt_nouid32, "nouid32"},
1235 {Opt_debug, "debug"},
1236 {Opt_oldalloc, "oldalloc"},
1237 {Opt_orlov, "orlov"},
1238 {Opt_user_xattr, "user_xattr"},
1239 {Opt_nouser_xattr, "nouser_xattr"},
1240 {Opt_acl, "acl"},
1241 {Opt_noacl, "noacl"},
1242 {Opt_noload, "noload"},
1243 {Opt_noload, "norecovery"},
1244 {Opt_nobh, "nobh"},
1245 {Opt_bh, "bh"},
1246 {Opt_commit, "commit=%u"},
1247 {Opt_min_batch_time, "min_batch_time=%u"},
1248 {Opt_max_batch_time, "max_batch_time=%u"},
1249 {Opt_journal_update, "journal=update"},
1250 {Opt_journal_dev, "journal_dev=%u"},
1251 {Opt_journal_checksum, "journal_checksum"},
1252 {Opt_journal_async_commit, "journal_async_commit"},
1253 {Opt_abort, "abort"},
1254 {Opt_data_journal, "data=journal"},
1255 {Opt_data_ordered, "data=ordered"},
1256 {Opt_data_writeback, "data=writeback"},
1257 {Opt_data_err_abort, "data_err=abort"},
1258 {Opt_data_err_ignore, "data_err=ignore"},
1259 {Opt_offusrjquota, "usrjquota="},
1260 {Opt_usrjquota, "usrjquota=%s"},
1261 {Opt_offgrpjquota, "grpjquota="},
1262 {Opt_grpjquota, "grpjquota=%s"},
1263 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1264 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1265 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1266 {Opt_grpquota, "grpquota"},
1267 {Opt_noquota, "noquota"},
1268 {Opt_quota, "quota"},
1269 {Opt_usrquota, "usrquota"},
1270 {Opt_barrier, "barrier=%u"},
1271 {Opt_barrier, "barrier"},
1272 {Opt_nobarrier, "nobarrier"},
1273 {Opt_i_version, "i_version"},
1274 {Opt_stripe, "stripe=%u"},
1275 {Opt_resize, "resize"},
1276 {Opt_delalloc, "delalloc"},
1277 {Opt_nodelalloc, "nodelalloc"},
1278 {Opt_block_validity, "block_validity"},
1279 {Opt_noblock_validity, "noblock_validity"},
1280 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1281 {Opt_journal_ioprio, "journal_ioprio=%u"},
1282 {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1283 {Opt_auto_da_alloc, "auto_da_alloc"},
1284 {Opt_noauto_da_alloc, "noauto_da_alloc"},
1285 {Opt_dioread_nolock, "dioread_nolock"},
1286 {Opt_dioread_lock, "dioread_lock"},
1287 {Opt_discard, "discard"},
1288 {Opt_nodiscard, "nodiscard"},
1289 {Opt_err, NULL},
1292 static ext4_fsblk_t get_sb_block(void **data)
1294 ext4_fsblk_t sb_block;
1295 char *options = (char *) *data;
1297 if (!options || strncmp(options, "sb=", 3) != 0)
1298 return 1; /* Default location */
1300 options += 3;
1301 /* TODO: use simple_strtoll with >32bit ext4 */
1302 sb_block = simple_strtoul(options, &options, 0);
1303 if (*options && *options != ',') {
1304 printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1305 (char *) *data);
1306 return 1;
1308 if (*options == ',')
1309 options++;
1310 *data = (void *) options;
1312 return sb_block;
1315 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1316 static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
1317 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1319 #ifdef CONFIG_QUOTA
1320 static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1322 struct ext4_sb_info *sbi = EXT4_SB(sb);
1323 char *qname;
1325 if (sb_any_quota_loaded(sb) &&
1326 !sbi->s_qf_names[qtype]) {
1327 ext4_msg(sb, KERN_ERR,
1328 "Cannot change journaled "
1329 "quota options when quota turned on");
1330 return 0;
1332 qname = match_strdup(args);
1333 if (!qname) {
1334 ext4_msg(sb, KERN_ERR,
1335 "Not enough memory for storing quotafile name");
1336 return 0;
1338 if (sbi->s_qf_names[qtype] &&
1339 strcmp(sbi->s_qf_names[qtype], qname)) {
1340 ext4_msg(sb, KERN_ERR,
1341 "%s quota file already specified", QTYPE2NAME(qtype));
1342 kfree(qname);
1343 return 0;
1345 sbi->s_qf_names[qtype] = qname;
1346 if (strchr(sbi->s_qf_names[qtype], '/')) {
1347 ext4_msg(sb, KERN_ERR,
1348 "quotafile must be on filesystem root");
1349 kfree(sbi->s_qf_names[qtype]);
1350 sbi->s_qf_names[qtype] = NULL;
1351 return 0;
1353 set_opt(sbi->s_mount_opt, QUOTA);
1354 return 1;
1357 static int clear_qf_name(struct super_block *sb, int qtype)
1360 struct ext4_sb_info *sbi = EXT4_SB(sb);
1362 if (sb_any_quota_loaded(sb) &&
1363 sbi->s_qf_names[qtype]) {
1364 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1365 " when quota turned on");
1366 return 0;
1369 * The space will be released later when all options are confirmed
1370 * to be correct
1372 sbi->s_qf_names[qtype] = NULL;
1373 return 1;
1375 #endif
1377 static int parse_options(char *options, struct super_block *sb,
1378 unsigned long *journal_devnum,
1379 unsigned int *journal_ioprio,
1380 ext4_fsblk_t *n_blocks_count, int is_remount)
1382 struct ext4_sb_info *sbi = EXT4_SB(sb);
1383 char *p;
1384 substring_t args[MAX_OPT_ARGS];
1385 int data_opt = 0;
1386 int option;
1387 #ifdef CONFIG_QUOTA
1388 int qfmt;
1389 #endif
1391 if (!options)
1392 return 1;
1394 while ((p = strsep(&options, ",")) != NULL) {
1395 int token;
1396 if (!*p)
1397 continue;
1400 * Initialize args struct so we know whether arg was
1401 * found; some options take optional arguments.
1403 args[0].to = args[0].from = 0;
1404 token = match_token(p, tokens, args);
1405 switch (token) {
1406 case Opt_bsd_df:
1407 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1408 clear_opt(sbi->s_mount_opt, MINIX_DF);
1409 break;
1410 case Opt_minix_df:
1411 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1412 set_opt(sbi->s_mount_opt, MINIX_DF);
1414 break;
1415 case Opt_grpid:
1416 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1417 set_opt(sbi->s_mount_opt, GRPID);
1419 break;
1420 case Opt_nogrpid:
1421 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1422 clear_opt(sbi->s_mount_opt, GRPID);
1424 break;
1425 case Opt_resuid:
1426 if (match_int(&args[0], &option))
1427 return 0;
1428 sbi->s_resuid = option;
1429 break;
1430 case Opt_resgid:
1431 if (match_int(&args[0], &option))
1432 return 0;
1433 sbi->s_resgid = option;
1434 break;
1435 case Opt_sb:
1436 /* handled by get_sb_block() instead of here */
1437 /* *sb_block = match_int(&args[0]); */
1438 break;
1439 case Opt_err_panic:
1440 clear_opt(sbi->s_mount_opt, ERRORS_CONT);
1441 clear_opt(sbi->s_mount_opt, ERRORS_RO);
1442 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
1443 break;
1444 case Opt_err_ro:
1445 clear_opt(sbi->s_mount_opt, ERRORS_CONT);
1446 clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
1447 set_opt(sbi->s_mount_opt, ERRORS_RO);
1448 break;
1449 case Opt_err_cont:
1450 clear_opt(sbi->s_mount_opt, ERRORS_RO);
1451 clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
1452 set_opt(sbi->s_mount_opt, ERRORS_CONT);
1453 break;
1454 case Opt_nouid32:
1455 set_opt(sbi->s_mount_opt, NO_UID32);
1456 break;
1457 case Opt_debug:
1458 set_opt(sbi->s_mount_opt, DEBUG);
1459 break;
1460 case Opt_oldalloc:
1461 set_opt(sbi->s_mount_opt, OLDALLOC);
1462 break;
1463 case Opt_orlov:
1464 clear_opt(sbi->s_mount_opt, OLDALLOC);
1465 break;
1466 #ifdef CONFIG_EXT4_FS_XATTR
1467 case Opt_user_xattr:
1468 set_opt(sbi->s_mount_opt, XATTR_USER);
1469 break;
1470 case Opt_nouser_xattr:
1471 clear_opt(sbi->s_mount_opt, XATTR_USER);
1472 break;
1473 #else
1474 case Opt_user_xattr:
1475 case Opt_nouser_xattr:
1476 ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported");
1477 break;
1478 #endif
1479 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1480 case Opt_acl:
1481 set_opt(sbi->s_mount_opt, POSIX_ACL);
1482 break;
1483 case Opt_noacl:
1484 clear_opt(sbi->s_mount_opt, POSIX_ACL);
1485 break;
1486 #else
1487 case Opt_acl:
1488 case Opt_noacl:
1489 ext4_msg(sb, KERN_ERR, "(no)acl options not supported");
1490 break;
1491 #endif
1492 case Opt_journal_update:
1493 /* @@@ FIXME */
1494 /* Eventually we will want to be able to create
1495 a journal file here. For now, only allow the
1496 user to specify an existing inode to be the
1497 journal file. */
1498 if (is_remount) {
1499 ext4_msg(sb, KERN_ERR,
1500 "Cannot specify journal on remount");
1501 return 0;
1503 set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
1504 break;
1505 case Opt_journal_dev:
1506 if (is_remount) {
1507 ext4_msg(sb, KERN_ERR,
1508 "Cannot specify journal on remount");
1509 return 0;
1511 if (match_int(&args[0], &option))
1512 return 0;
1513 *journal_devnum = option;
1514 break;
1515 case Opt_journal_checksum:
1516 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
1517 break;
1518 case Opt_journal_async_commit:
1519 set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
1520 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
1521 break;
1522 case Opt_noload:
1523 set_opt(sbi->s_mount_opt, NOLOAD);
1524 break;
1525 case Opt_commit:
1526 if (match_int(&args[0], &option))
1527 return 0;
1528 if (option < 0)
1529 return 0;
1530 if (option == 0)
1531 option = JBD2_DEFAULT_MAX_COMMIT_AGE;
1532 sbi->s_commit_interval = HZ * option;
1533 break;
1534 case Opt_max_batch_time:
1535 if (match_int(&args[0], &option))
1536 return 0;
1537 if (option < 0)
1538 return 0;
1539 if (option == 0)
1540 option = EXT4_DEF_MAX_BATCH_TIME;
1541 sbi->s_max_batch_time = option;
1542 break;
1543 case Opt_min_batch_time:
1544 if (match_int(&args[0], &option))
1545 return 0;
1546 if (option < 0)
1547 return 0;
1548 sbi->s_min_batch_time = option;
1549 break;
1550 case Opt_data_journal:
1551 data_opt = EXT4_MOUNT_JOURNAL_DATA;
1552 goto datacheck;
1553 case Opt_data_ordered:
1554 data_opt = EXT4_MOUNT_ORDERED_DATA;
1555 goto datacheck;
1556 case Opt_data_writeback:
1557 data_opt = EXT4_MOUNT_WRITEBACK_DATA;
1558 datacheck:
1559 if (is_remount) {
1560 if (test_opt(sb, DATA_FLAGS) != data_opt) {
1561 ext4_msg(sb, KERN_ERR,
1562 "Cannot change data mode on remount");
1563 return 0;
1565 } else {
1566 clear_opt(sbi->s_mount_opt, DATA_FLAGS);
1567 sbi->s_mount_opt |= data_opt;
1569 break;
1570 case Opt_data_err_abort:
1571 set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
1572 break;
1573 case Opt_data_err_ignore:
1574 clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
1575 break;
1576 #ifdef CONFIG_QUOTA
1577 case Opt_usrjquota:
1578 if (!set_qf_name(sb, USRQUOTA, &args[0]))
1579 return 0;
1580 break;
1581 case Opt_grpjquota:
1582 if (!set_qf_name(sb, GRPQUOTA, &args[0]))
1583 return 0;
1584 break;
1585 case Opt_offusrjquota:
1586 if (!clear_qf_name(sb, USRQUOTA))
1587 return 0;
1588 break;
1589 case Opt_offgrpjquota:
1590 if (!clear_qf_name(sb, GRPQUOTA))
1591 return 0;
1592 break;
1594 case Opt_jqfmt_vfsold:
1595 qfmt = QFMT_VFS_OLD;
1596 goto set_qf_format;
1597 case Opt_jqfmt_vfsv0:
1598 qfmt = QFMT_VFS_V0;
1599 goto set_qf_format;
1600 case Opt_jqfmt_vfsv1:
1601 qfmt = QFMT_VFS_V1;
1602 set_qf_format:
1603 if (sb_any_quota_loaded(sb) &&
1604 sbi->s_jquota_fmt != qfmt) {
1605 ext4_msg(sb, KERN_ERR, "Cannot change "
1606 "journaled quota options when "
1607 "quota turned on");
1608 return 0;
1610 sbi->s_jquota_fmt = qfmt;
1611 break;
1612 case Opt_quota:
1613 case Opt_usrquota:
1614 set_opt(sbi->s_mount_opt, QUOTA);
1615 set_opt(sbi->s_mount_opt, USRQUOTA);
1616 break;
1617 case Opt_grpquota:
1618 set_opt(sbi->s_mount_opt, QUOTA);
1619 set_opt(sbi->s_mount_opt, GRPQUOTA);
1620 break;
1621 case Opt_noquota:
1622 if (sb_any_quota_loaded(sb)) {
1623 ext4_msg(sb, KERN_ERR, "Cannot change quota "
1624 "options when quota turned on");
1625 return 0;
1627 clear_opt(sbi->s_mount_opt, QUOTA);
1628 clear_opt(sbi->s_mount_opt, USRQUOTA);
1629 clear_opt(sbi->s_mount_opt, GRPQUOTA);
1630 break;
1631 #else
1632 case Opt_quota:
1633 case Opt_usrquota:
1634 case Opt_grpquota:
1635 ext4_msg(sb, KERN_ERR,
1636 "quota options not supported");
1637 break;
1638 case Opt_usrjquota:
1639 case Opt_grpjquota:
1640 case Opt_offusrjquota:
1641 case Opt_offgrpjquota:
1642 case Opt_jqfmt_vfsold:
1643 case Opt_jqfmt_vfsv0:
1644 case Opt_jqfmt_vfsv1:
1645 ext4_msg(sb, KERN_ERR,
1646 "journaled quota options not supported");
1647 break;
1648 case Opt_noquota:
1649 break;
1650 #endif
1651 case Opt_abort:
1652 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1653 break;
1654 case Opt_nobarrier:
1655 clear_opt(sbi->s_mount_opt, BARRIER);
1656 break;
1657 case Opt_barrier:
1658 if (args[0].from) {
1659 if (match_int(&args[0], &option))
1660 return 0;
1661 } else
1662 option = 1; /* No argument, default to 1 */
1663 if (option)
1664 set_opt(sbi->s_mount_opt, BARRIER);
1665 else
1666 clear_opt(sbi->s_mount_opt, BARRIER);
1667 break;
1668 case Opt_ignore:
1669 break;
1670 case Opt_resize:
1671 if (!is_remount) {
1672 ext4_msg(sb, KERN_ERR,
1673 "resize option only available "
1674 "for remount");
1675 return 0;
1677 if (match_int(&args[0], &option) != 0)
1678 return 0;
1679 *n_blocks_count = option;
1680 break;
1681 case Opt_nobh:
1682 ext4_msg(sb, KERN_WARNING,
1683 "Ignoring deprecated nobh option");
1684 break;
1685 case Opt_bh:
1686 ext4_msg(sb, KERN_WARNING,
1687 "Ignoring deprecated bh option");
1688 break;
1689 case Opt_i_version:
1690 set_opt(sbi->s_mount_opt, I_VERSION);
1691 sb->s_flags |= MS_I_VERSION;
1692 break;
1693 case Opt_nodelalloc:
1694 clear_opt(sbi->s_mount_opt, DELALLOC);
1695 break;
1696 case Opt_stripe:
1697 if (match_int(&args[0], &option))
1698 return 0;
1699 if (option < 0)
1700 return 0;
1701 sbi->s_stripe = option;
1702 break;
1703 case Opt_delalloc:
1704 set_opt(sbi->s_mount_opt, DELALLOC);
1705 break;
1706 case Opt_block_validity:
1707 set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
1708 break;
1709 case Opt_noblock_validity:
1710 clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
1711 break;
1712 case Opt_inode_readahead_blks:
1713 if (match_int(&args[0], &option))
1714 return 0;
1715 if (option < 0 || option > (1 << 30))
1716 return 0;
1717 if (!is_power_of_2(option)) {
1718 ext4_msg(sb, KERN_ERR,
1719 "EXT4-fs: inode_readahead_blks"
1720 " must be a power of 2");
1721 return 0;
1723 sbi->s_inode_readahead_blks = option;
1724 break;
1725 case Opt_journal_ioprio:
1726 if (match_int(&args[0], &option))
1727 return 0;
1728 if (option < 0 || option > 7)
1729 break;
1730 *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
1731 option);
1732 break;
1733 case Opt_noauto_da_alloc:
1734 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1735 break;
1736 case Opt_auto_da_alloc:
1737 if (args[0].from) {
1738 if (match_int(&args[0], &option))
1739 return 0;
1740 } else
1741 option = 1; /* No argument, default to 1 */
1742 if (option)
1743 clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
1744 else
1745 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1746 break;
1747 case Opt_discard:
1748 set_opt(sbi->s_mount_opt, DISCARD);
1749 break;
1750 case Opt_nodiscard:
1751 clear_opt(sbi->s_mount_opt, DISCARD);
1752 break;
1753 case Opt_dioread_nolock:
1754 set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
1755 break;
1756 case Opt_dioread_lock:
1757 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
1758 break;
1759 default:
1760 ext4_msg(sb, KERN_ERR,
1761 "Unrecognized mount option \"%s\" "
1762 "or missing value", p);
1763 return 0;
1766 #ifdef CONFIG_QUOTA
1767 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1768 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1769 clear_opt(sbi->s_mount_opt, USRQUOTA);
1771 if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1772 clear_opt(sbi->s_mount_opt, GRPQUOTA);
1774 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1775 ext4_msg(sb, KERN_ERR, "old and new quota "
1776 "format mixing");
1777 return 0;
1780 if (!sbi->s_jquota_fmt) {
1781 ext4_msg(sb, KERN_ERR, "journaled quota format "
1782 "not specified");
1783 return 0;
1785 } else {
1786 if (sbi->s_jquota_fmt) {
1787 ext4_msg(sb, KERN_ERR, "journaled quota format "
1788 "specified with no journaling "
1789 "enabled");
1790 return 0;
1793 #endif
1794 return 1;
1797 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1798 int read_only)
1800 struct ext4_sb_info *sbi = EXT4_SB(sb);
1801 int res = 0;
1803 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
1804 ext4_msg(sb, KERN_ERR, "revision level too high, "
1805 "forcing read-only mode");
1806 res = MS_RDONLY;
1808 if (read_only)
1809 return res;
1810 if (!(sbi->s_mount_state & EXT4_VALID_FS))
1811 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
1812 "running e2fsck is recommended");
1813 else if ((sbi->s_mount_state & EXT4_ERROR_FS))
1814 ext4_msg(sb, KERN_WARNING,
1815 "warning: mounting fs with errors, "
1816 "running e2fsck is recommended");
1817 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
1818 le16_to_cpu(es->s_mnt_count) >=
1819 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
1820 ext4_msg(sb, KERN_WARNING,
1821 "warning: maximal mount count reached, "
1822 "running e2fsck is recommended");
1823 else if (le32_to_cpu(es->s_checkinterval) &&
1824 (le32_to_cpu(es->s_lastcheck) +
1825 le32_to_cpu(es->s_checkinterval) <= get_seconds()))
1826 ext4_msg(sb, KERN_WARNING,
1827 "warning: checktime reached, "
1828 "running e2fsck is recommended");
1829 if (!sbi->s_journal)
1830 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1831 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
1832 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
1833 le16_add_cpu(&es->s_mnt_count, 1);
1834 es->s_mtime = cpu_to_le32(get_seconds());
1835 ext4_update_dynamic_rev(sb);
1836 if (sbi->s_journal)
1837 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
1839 ext4_commit_super(sb, 1);
1840 if (test_opt(sb, DEBUG))
1841 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
1842 "bpg=%lu, ipg=%lu, mo=%04x]\n",
1843 sb->s_blocksize,
1844 sbi->s_groups_count,
1845 EXT4_BLOCKS_PER_GROUP(sb),
1846 EXT4_INODES_PER_GROUP(sb),
1847 sbi->s_mount_opt);
1849 return res;
1852 static int ext4_fill_flex_info(struct super_block *sb)
1854 struct ext4_sb_info *sbi = EXT4_SB(sb);
1855 struct ext4_group_desc *gdp = NULL;
1856 ext4_group_t flex_group_count;
1857 ext4_group_t flex_group;
1858 int groups_per_flex = 0;
1859 size_t size;
1860 int i;
1862 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
1863 groups_per_flex = 1 << sbi->s_log_groups_per_flex;
1865 if (groups_per_flex < 2) {
1866 sbi->s_log_groups_per_flex = 0;
1867 return 1;
1870 /* We allocate both existing and potentially added groups */
1871 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
1872 ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
1873 EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
1874 size = flex_group_count * sizeof(struct flex_groups);
1875 sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
1876 if (sbi->s_flex_groups == NULL) {
1877 sbi->s_flex_groups = vmalloc(size);
1878 if (sbi->s_flex_groups)
1879 memset(sbi->s_flex_groups, 0, size);
1881 if (sbi->s_flex_groups == NULL) {
1882 ext4_msg(sb, KERN_ERR, "not enough memory for "
1883 "%u flex groups", flex_group_count);
1884 goto failed;
1887 for (i = 0; i < sbi->s_groups_count; i++) {
1888 gdp = ext4_get_group_desc(sb, i, NULL);
1890 flex_group = ext4_flex_group(sbi, i);
1891 atomic_add(ext4_free_inodes_count(sb, gdp),
1892 &sbi->s_flex_groups[flex_group].free_inodes);
1893 atomic_add(ext4_free_blks_count(sb, gdp),
1894 &sbi->s_flex_groups[flex_group].free_blocks);
1895 atomic_add(ext4_used_dirs_count(sb, gdp),
1896 &sbi->s_flex_groups[flex_group].used_dirs);
1899 return 1;
1900 failed:
1901 return 0;
1904 __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
1905 struct ext4_group_desc *gdp)
1907 __u16 crc = 0;
1909 if (sbi->s_es->s_feature_ro_compat &
1910 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
1911 int offset = offsetof(struct ext4_group_desc, bg_checksum);
1912 __le32 le_group = cpu_to_le32(block_group);
1914 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
1915 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
1916 crc = crc16(crc, (__u8 *)gdp, offset);
1917 offset += sizeof(gdp->bg_checksum); /* skip checksum */
1918 /* for checksum of struct ext4_group_desc do the rest...*/
1919 if ((sbi->s_es->s_feature_incompat &
1920 cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
1921 offset < le16_to_cpu(sbi->s_es->s_desc_size))
1922 crc = crc16(crc, (__u8 *)gdp + offset,
1923 le16_to_cpu(sbi->s_es->s_desc_size) -
1924 offset);
1927 return cpu_to_le16(crc);
1930 int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
1931 struct ext4_group_desc *gdp)
1933 if ((sbi->s_es->s_feature_ro_compat &
1934 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
1935 (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
1936 return 0;
1938 return 1;
1941 /* Called at mount-time, super-block is locked */
1942 static int ext4_check_descriptors(struct super_block *sb)
1944 struct ext4_sb_info *sbi = EXT4_SB(sb);
1945 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
1946 ext4_fsblk_t last_block;
1947 ext4_fsblk_t block_bitmap;
1948 ext4_fsblk_t inode_bitmap;
1949 ext4_fsblk_t inode_table;
1950 int flexbg_flag = 0;
1951 ext4_group_t i;
1953 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
1954 flexbg_flag = 1;
1956 ext4_debug("Checking group descriptors");
1958 for (i = 0; i < sbi->s_groups_count; i++) {
1959 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1961 if (i == sbi->s_groups_count - 1 || flexbg_flag)
1962 last_block = ext4_blocks_count(sbi->s_es) - 1;
1963 else
1964 last_block = first_block +
1965 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1967 block_bitmap = ext4_block_bitmap(sb, gdp);
1968 if (block_bitmap < first_block || block_bitmap > last_block) {
1969 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1970 "Block bitmap for group %u not in group "
1971 "(block %llu)!", i, block_bitmap);
1972 return 0;
1974 inode_bitmap = ext4_inode_bitmap(sb, gdp);
1975 if (inode_bitmap < first_block || inode_bitmap > last_block) {
1976 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1977 "Inode bitmap for group %u not in group "
1978 "(block %llu)!", i, inode_bitmap);
1979 return 0;
1981 inode_table = ext4_inode_table(sb, gdp);
1982 if (inode_table < first_block ||
1983 inode_table + sbi->s_itb_per_group - 1 > last_block) {
1984 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1985 "Inode table for group %u not in group "
1986 "(block %llu)!", i, inode_table);
1987 return 0;
1989 ext4_lock_group(sb, i);
1990 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
1991 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1992 "Checksum for group %u failed (%u!=%u)",
1993 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
1994 gdp)), le16_to_cpu(gdp->bg_checksum));
1995 if (!(sb->s_flags & MS_RDONLY)) {
1996 ext4_unlock_group(sb, i);
1997 return 0;
2000 ext4_unlock_group(sb, i);
2001 if (!flexbg_flag)
2002 first_block += EXT4_BLOCKS_PER_GROUP(sb);
2005 ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
2006 sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
2007 return 1;
2010 /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2011 * the superblock) which were deleted from all directories, but held open by
2012 * a process at the time of a crash. We walk the list and try to delete these
2013 * inodes at recovery time (only with a read-write filesystem).
2015 * In order to keep the orphan inode chain consistent during traversal (in
2016 * case of crash during recovery), we link each inode into the superblock
2017 * orphan list_head and handle it the same way as an inode deletion during
2018 * normal operation (which journals the operations for us).
2020 * We only do an iget() and an iput() on each inode, which is very safe if we
2021 * accidentally point at an in-use or already deleted inode. The worst that
2022 * can happen in this case is that we get a "bit already cleared" message from
2023 * ext4_free_inode(). The only reason we would point at a wrong inode is if
2024 * e2fsck was run on this filesystem, and it must have already done the orphan
2025 * inode cleanup for us, so we can safely abort without any further action.
2027 static void ext4_orphan_cleanup(struct super_block *sb,
2028 struct ext4_super_block *es)
2030 unsigned int s_flags = sb->s_flags;
2031 int nr_orphans = 0, nr_truncates = 0;
2032 #ifdef CONFIG_QUOTA
2033 int i;
2034 #endif
2035 if (!es->s_last_orphan) {
2036 jbd_debug(4, "no orphan inodes to clean up\n");
2037 return;
2040 if (bdev_read_only(sb->s_bdev)) {
2041 ext4_msg(sb, KERN_ERR, "write access "
2042 "unavailable, skipping orphan cleanup");
2043 return;
2046 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2047 if (es->s_last_orphan)
2048 jbd_debug(1, "Errors on filesystem, "
2049 "clearing orphan list.\n");
2050 es->s_last_orphan = 0;
2051 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2052 return;
2055 if (s_flags & MS_RDONLY) {
2056 ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2057 sb->s_flags &= ~MS_RDONLY;
2059 #ifdef CONFIG_QUOTA
2060 /* Needed for iput() to work correctly and not trash data */
2061 sb->s_flags |= MS_ACTIVE;
2062 /* Turn on quotas so that they are updated correctly */
2063 for (i = 0; i < MAXQUOTAS; i++) {
2064 if (EXT4_SB(sb)->s_qf_names[i]) {
2065 int ret = ext4_quota_on_mount(sb, i);
2066 if (ret < 0)
2067 ext4_msg(sb, KERN_ERR,
2068 "Cannot turn on journaled "
2069 "quota: error %d", ret);
2072 #endif
2074 while (es->s_last_orphan) {
2075 struct inode *inode;
2077 inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
2078 if (IS_ERR(inode)) {
2079 es->s_last_orphan = 0;
2080 break;
2083 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2084 dquot_initialize(inode);
2085 if (inode->i_nlink) {
2086 ext4_msg(sb, KERN_DEBUG,
2087 "%s: truncating inode %lu to %lld bytes",
2088 __func__, inode->i_ino, inode->i_size);
2089 jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2090 inode->i_ino, inode->i_size);
2091 ext4_truncate(inode);
2092 nr_truncates++;
2093 } else {
2094 ext4_msg(sb, KERN_DEBUG,
2095 "%s: deleting unreferenced inode %lu",
2096 __func__, inode->i_ino);
2097 jbd_debug(2, "deleting unreferenced inode %lu\n",
2098 inode->i_ino);
2099 nr_orphans++;
2101 iput(inode); /* The delete magic happens here! */
2104 #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2106 if (nr_orphans)
2107 ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
2108 PLURAL(nr_orphans));
2109 if (nr_truncates)
2110 ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
2111 PLURAL(nr_truncates));
2112 #ifdef CONFIG_QUOTA
2113 /* Turn quotas off */
2114 for (i = 0; i < MAXQUOTAS; i++) {
2115 if (sb_dqopt(sb)->files[i])
2116 dquot_quota_off(sb, i);
2118 #endif
2119 sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2123 * Maximal extent format file size.
2124 * Resulting logical blkno at s_maxbytes must fit in our on-disk
2125 * extent format containers, within a sector_t, and within i_blocks
2126 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
2127 * so that won't be a limiting factor.
2129 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2131 static loff_t ext4_max_size(int blkbits, int has_huge_files)
2133 loff_t res;
2134 loff_t upper_limit = MAX_LFS_FILESIZE;
2136 /* small i_blocks in vfs inode? */
2137 if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2139 * CONFIG_LBDAF is not enabled implies the inode
2140 * i_block represent total blocks in 512 bytes
2141 * 32 == size of vfs inode i_blocks * 8
2143 upper_limit = (1LL << 32) - 1;
2145 /* total blocks in file system block size */
2146 upper_limit >>= (blkbits - 9);
2147 upper_limit <<= blkbits;
2150 /* 32-bit extent-start container, ee_block */
2151 res = 1LL << 32;
2152 res <<= blkbits;
2153 res -= 1;
2155 /* Sanity check against vm- & vfs- imposed limits */
2156 if (res > upper_limit)
2157 res = upper_limit;
2159 return res;
2163 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
2164 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
2165 * We need to be 1 filesystem block less than the 2^48 sector limit.
2167 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2169 loff_t res = EXT4_NDIR_BLOCKS;
2170 int meta_blocks;
2171 loff_t upper_limit;
2172 /* This is calculated to be the largest file size for a dense, block
2173 * mapped file such that the file's total number of 512-byte sectors,
2174 * including data and all indirect blocks, does not exceed (2^48 - 1).
2176 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
2177 * number of 512-byte sectors of the file.
2180 if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2182 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2183 * the inode i_block field represents total file blocks in
2184 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2186 upper_limit = (1LL << 32) - 1;
2188 /* total blocks in file system block size */
2189 upper_limit >>= (bits - 9);
2191 } else {
2193 * We use 48 bit ext4_inode i_blocks
2194 * With EXT4_HUGE_FILE_FL set the i_blocks
2195 * represent total number of blocks in
2196 * file system block size
2198 upper_limit = (1LL << 48) - 1;
2202 /* indirect blocks */
2203 meta_blocks = 1;
2204 /* double indirect blocks */
2205 meta_blocks += 1 + (1LL << (bits-2));
2206 /* tripple indirect blocks */
2207 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
2209 upper_limit -= meta_blocks;
2210 upper_limit <<= bits;
2212 res += 1LL << (bits-2);
2213 res += 1LL << (2*(bits-2));
2214 res += 1LL << (3*(bits-2));
2215 res <<= bits;
2216 if (res > upper_limit)
2217 res = upper_limit;
2219 if (res > MAX_LFS_FILESIZE)
2220 res = MAX_LFS_FILESIZE;
2222 return res;
2225 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2226 ext4_fsblk_t logical_sb_block, int nr)
2228 struct ext4_sb_info *sbi = EXT4_SB(sb);
2229 ext4_group_t bg, first_meta_bg;
2230 int has_super = 0;
2232 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
2234 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
2235 nr < first_meta_bg)
2236 return logical_sb_block + nr + 1;
2237 bg = sbi->s_desc_per_block * nr;
2238 if (ext4_bg_has_super(sb, bg))
2239 has_super = 1;
2241 return (has_super + ext4_group_first_block_no(sb, bg));
2245 * ext4_get_stripe_size: Get the stripe size.
2246 * @sbi: In memory super block info
2248 * If we have specified it via mount option, then
2249 * use the mount option value. If the value specified at mount time is
2250 * greater than the blocks per group use the super block value.
2251 * If the super block value is greater than blocks per group return 0.
2252 * Allocator needs it be less than blocks per group.
2255 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2257 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
2258 unsigned long stripe_width =
2259 le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2261 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2262 return sbi->s_stripe;
2264 if (stripe_width <= sbi->s_blocks_per_group)
2265 return stripe_width;
2267 if (stride <= sbi->s_blocks_per_group)
2268 return stride;
2270 return 0;
2273 /* sysfs supprt */
2275 struct ext4_attr {
2276 struct attribute attr;
2277 ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
2278 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
2279 const char *, size_t);
2280 int offset;
2283 static int parse_strtoul(const char *buf,
2284 unsigned long max, unsigned long *value)
2286 char *endp;
2288 *value = simple_strtoul(skip_spaces(buf), &endp, 0);
2289 endp = skip_spaces(endp);
2290 if (*endp || *value > max)
2291 return -EINVAL;
2293 return 0;
2296 static ssize_t delayed_allocation_blocks_show(struct ext4_attr *a,
2297 struct ext4_sb_info *sbi,
2298 char *buf)
2300 return snprintf(buf, PAGE_SIZE, "%llu\n",
2301 (s64) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2304 static ssize_t session_write_kbytes_show(struct ext4_attr *a,
2305 struct ext4_sb_info *sbi, char *buf)
2307 struct super_block *sb = sbi->s_buddy_cache->i_sb;
2309 if (!sb->s_bdev->bd_part)
2310 return snprintf(buf, PAGE_SIZE, "0\n");
2311 return snprintf(buf, PAGE_SIZE, "%lu\n",
2312 (part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
2313 sbi->s_sectors_written_start) >> 1);
2316 static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
2317 struct ext4_sb_info *sbi, char *buf)
2319 struct super_block *sb = sbi->s_buddy_cache->i_sb;
2321 if (!sb->s_bdev->bd_part)
2322 return snprintf(buf, PAGE_SIZE, "0\n");
2323 return snprintf(buf, PAGE_SIZE, "%llu\n",
2324 (unsigned long long)(sbi->s_kbytes_written +
2325 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
2326 EXT4_SB(sb)->s_sectors_written_start) >> 1)));
2329 static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
2330 struct ext4_sb_info *sbi,
2331 const char *buf, size_t count)
2333 unsigned long t;
2335 if (parse_strtoul(buf, 0x40000000, &t))
2336 return -EINVAL;
2338 if (!is_power_of_2(t))
2339 return -EINVAL;
2341 sbi->s_inode_readahead_blks = t;
2342 return count;
2345 static ssize_t sbi_ui_show(struct ext4_attr *a,
2346 struct ext4_sb_info *sbi, char *buf)
2348 unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
2350 return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
2353 static ssize_t sbi_ui_store(struct ext4_attr *a,
2354 struct ext4_sb_info *sbi,
2355 const char *buf, size_t count)
2357 unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
2358 unsigned long t;
2360 if (parse_strtoul(buf, 0xffffffff, &t))
2361 return -EINVAL;
2362 *ui = t;
2363 return count;
2366 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
2367 static struct ext4_attr ext4_attr_##_name = { \
2368 .attr = {.name = __stringify(_name), .mode = _mode }, \
2369 .show = _show, \
2370 .store = _store, \
2371 .offset = offsetof(struct ext4_sb_info, _elname), \
2373 #define EXT4_ATTR(name, mode, show, store) \
2374 static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
2376 #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
2377 #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
2378 #define EXT4_RW_ATTR_SBI_UI(name, elname) \
2379 EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
2380 #define ATTR_LIST(name) &ext4_attr_##name.attr
2382 EXT4_RO_ATTR(delayed_allocation_blocks);
2383 EXT4_RO_ATTR(session_write_kbytes);
2384 EXT4_RO_ATTR(lifetime_write_kbytes);
2385 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
2386 inode_readahead_blks_store, s_inode_readahead_blks);
2387 EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
2388 EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
2389 EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
2390 EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
2391 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
2392 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
2393 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
2394 EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
2396 static struct attribute *ext4_attrs[] = {
2397 ATTR_LIST(delayed_allocation_blocks),
2398 ATTR_LIST(session_write_kbytes),
2399 ATTR_LIST(lifetime_write_kbytes),
2400 ATTR_LIST(inode_readahead_blks),
2401 ATTR_LIST(inode_goal),
2402 ATTR_LIST(mb_stats),
2403 ATTR_LIST(mb_max_to_scan),
2404 ATTR_LIST(mb_min_to_scan),
2405 ATTR_LIST(mb_order2_req),
2406 ATTR_LIST(mb_stream_req),
2407 ATTR_LIST(mb_group_prealloc),
2408 ATTR_LIST(max_writeback_mb_bump),
2409 NULL,
2412 static ssize_t ext4_attr_show(struct kobject *kobj,
2413 struct attribute *attr, char *buf)
2415 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
2416 s_kobj);
2417 struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
2419 return a->show ? a->show(a, sbi, buf) : 0;
2422 static ssize_t ext4_attr_store(struct kobject *kobj,
2423 struct attribute *attr,
2424 const char *buf, size_t len)
2426 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
2427 s_kobj);
2428 struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
2430 return a->store ? a->store(a, sbi, buf, len) : 0;
2433 static void ext4_sb_release(struct kobject *kobj)
2435 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
2436 s_kobj);
2437 complete(&sbi->s_kobj_unregister);
2441 static const struct sysfs_ops ext4_attr_ops = {
2442 .show = ext4_attr_show,
2443 .store = ext4_attr_store,
2446 static struct kobj_type ext4_ktype = {
2447 .default_attrs = ext4_attrs,
2448 .sysfs_ops = &ext4_attr_ops,
2449 .release = ext4_sb_release,
2453 * Check whether this filesystem can be mounted based on
2454 * the features present and the RDONLY/RDWR mount requested.
2455 * Returns 1 if this filesystem can be mounted as requested,
2456 * 0 if it cannot be.
2458 static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2460 if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) {
2461 ext4_msg(sb, KERN_ERR,
2462 "Couldn't mount because of "
2463 "unsupported optional features (%x)",
2464 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
2465 ~EXT4_FEATURE_INCOMPAT_SUPP));
2466 return 0;
2469 if (readonly)
2470 return 1;
2472 /* Check that feature set is OK for a read-write mount */
2473 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
2474 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
2475 "unsupported optional features (%x)",
2476 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
2477 ~EXT4_FEATURE_RO_COMPAT_SUPP));
2478 return 0;
2481 * Large file size enabled file system can only be mounted
2482 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
2484 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2485 if (sizeof(blkcnt_t) < sizeof(u64)) {
2486 ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
2487 "cannot be mounted RDWR without "
2488 "CONFIG_LBDAF");
2489 return 0;
2492 return 1;
2496 * This function is called once a day if we have errors logged
2497 * on the file system
2499 static void print_daily_error_info(unsigned long arg)
2501 struct super_block *sb = (struct super_block *) arg;
2502 struct ext4_sb_info *sbi;
2503 struct ext4_super_block *es;
2505 sbi = EXT4_SB(sb);
2506 es = sbi->s_es;
2508 if (es->s_error_count)
2509 ext4_msg(sb, KERN_NOTICE, "error count: %u",
2510 le32_to_cpu(es->s_error_count));
2511 if (es->s_first_error_time) {
2512 printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
2513 sb->s_id, le32_to_cpu(es->s_first_error_time),
2514 (int) sizeof(es->s_first_error_func),
2515 es->s_first_error_func,
2516 le32_to_cpu(es->s_first_error_line));
2517 if (es->s_first_error_ino)
2518 printk(": inode %u",
2519 le32_to_cpu(es->s_first_error_ino));
2520 if (es->s_first_error_block)
2521 printk(": block %llu", (unsigned long long)
2522 le64_to_cpu(es->s_first_error_block));
2523 printk("\n");
2525 if (es->s_last_error_time) {
2526 printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
2527 sb->s_id, le32_to_cpu(es->s_last_error_time),
2528 (int) sizeof(es->s_last_error_func),
2529 es->s_last_error_func,
2530 le32_to_cpu(es->s_last_error_line));
2531 if (es->s_last_error_ino)
2532 printk(": inode %u",
2533 le32_to_cpu(es->s_last_error_ino));
2534 if (es->s_last_error_block)
2535 printk(": block %llu", (unsigned long long)
2536 le64_to_cpu(es->s_last_error_block));
2537 printk("\n");
2539 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
2542 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2543 __releases(kernel_lock)
2544 __acquires(kernel_lock)
2546 char *orig_data = kstrdup(data, GFP_KERNEL);
2547 struct buffer_head *bh;
2548 struct ext4_super_block *es = NULL;
2549 struct ext4_sb_info *sbi;
2550 ext4_fsblk_t block;
2551 ext4_fsblk_t sb_block = get_sb_block(&data);
2552 ext4_fsblk_t logical_sb_block;
2553 unsigned long offset = 0;
2554 unsigned long journal_devnum = 0;
2555 unsigned long def_mount_opts;
2556 struct inode *root;
2557 char *cp;
2558 const char *descr;
2559 int ret = -ENOMEM;
2560 int blocksize;
2561 unsigned int db_count;
2562 unsigned int i;
2563 int needs_recovery, has_huge_files;
2564 __u64 blocks_count;
2565 int err;
2566 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
2568 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2569 if (!sbi)
2570 goto out_free_orig;
2572 sbi->s_blockgroup_lock =
2573 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
2574 if (!sbi->s_blockgroup_lock) {
2575 kfree(sbi);
2576 goto out_free_orig;
2578 sb->s_fs_info = sbi;
2579 sbi->s_mount_opt = 0;
2580 sbi->s_resuid = EXT4_DEF_RESUID;
2581 sbi->s_resgid = EXT4_DEF_RESGID;
2582 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
2583 sbi->s_sb_block = sb_block;
2584 if (sb->s_bdev->bd_part)
2585 sbi->s_sectors_written_start =
2586 part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2588 /* Cleanup superblock name */
2589 for (cp = sb->s_id; (cp = strchr(cp, '/'));)
2590 *cp = '!';
2592 ret = -EINVAL;
2593 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
2594 if (!blocksize) {
2595 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
2596 goto out_fail;
2600 * The ext4 superblock will not be buffer aligned for other than 1kB
2601 * block sizes. We need to calculate the offset from buffer start.
2603 if (blocksize != EXT4_MIN_BLOCK_SIZE) {
2604 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
2605 offset = do_div(logical_sb_block, blocksize);
2606 } else {
2607 logical_sb_block = sb_block;
2610 if (!(bh = sb_bread(sb, logical_sb_block))) {
2611 ext4_msg(sb, KERN_ERR, "unable to read superblock");
2612 goto out_fail;
2615 * Note: s_es must be initialized as soon as possible because
2616 * some ext4 macro-instructions depend on its value
2618 es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
2619 sbi->s_es = es;
2620 sb->s_magic = le16_to_cpu(es->s_magic);
2621 if (sb->s_magic != EXT4_SUPER_MAGIC)
2622 goto cantfind_ext4;
2623 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
2625 /* Set defaults before we parse the mount options */
2626 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
2627 if (def_mount_opts & EXT4_DEFM_DEBUG)
2628 set_opt(sbi->s_mount_opt, DEBUG);
2629 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
2630 ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
2631 "2.6.38");
2632 set_opt(sbi->s_mount_opt, GRPID);
2634 if (def_mount_opts & EXT4_DEFM_UID16)
2635 set_opt(sbi->s_mount_opt, NO_UID32);
2636 #ifdef CONFIG_EXT4_FS_XATTR
2637 if (def_mount_opts & EXT4_DEFM_XATTR_USER)
2638 set_opt(sbi->s_mount_opt, XATTR_USER);
2639 #endif
2640 #ifdef CONFIG_EXT4_FS_POSIX_ACL
2641 if (def_mount_opts & EXT4_DEFM_ACL)
2642 set_opt(sbi->s_mount_opt, POSIX_ACL);
2643 #endif
2644 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
2645 set_opt(sbi->s_mount_opt, JOURNAL_DATA);
2646 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
2647 set_opt(sbi->s_mount_opt, ORDERED_DATA);
2648 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
2649 set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
2651 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
2652 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
2653 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
2654 set_opt(sbi->s_mount_opt, ERRORS_CONT);
2655 else
2656 set_opt(sbi->s_mount_opt, ERRORS_RO);
2657 if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
2658 set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
2659 if (def_mount_opts & EXT4_DEFM_DISCARD)
2660 set_opt(sbi->s_mount_opt, DISCARD);
2662 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
2663 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
2664 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
2665 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
2666 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
2668 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
2669 set_opt(sbi->s_mount_opt, BARRIER);
2672 * enable delayed allocation by default
2673 * Use -o nodelalloc to turn it off
2675 if (!IS_EXT3_SB(sb) &&
2676 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
2677 set_opt(sbi->s_mount_opt, DELALLOC);
2679 if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
2680 &journal_devnum, &journal_ioprio, NULL, 0)) {
2681 ext4_msg(sb, KERN_WARNING,
2682 "failed to parse options in superblock: %s",
2683 sbi->s_es->s_mount_opts);
2685 if (!parse_options((char *) data, sb, &journal_devnum,
2686 &journal_ioprio, NULL, 0))
2687 goto failed_mount;
2689 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
2690 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
2692 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
2693 (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
2694 EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
2695 EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U)))
2696 ext4_msg(sb, KERN_WARNING,
2697 "feature flags set on rev 0 fs, "
2698 "running e2fsck is recommended");
2701 * Check feature flags regardless of the revision level, since we
2702 * previously didn't change the revision level when setting the flags,
2703 * so there is a chance incompat flags are set on a rev 0 filesystem.
2705 if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
2706 goto failed_mount;
2708 blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
2710 if (blocksize < EXT4_MIN_BLOCK_SIZE ||
2711 blocksize > EXT4_MAX_BLOCK_SIZE) {
2712 ext4_msg(sb, KERN_ERR,
2713 "Unsupported filesystem blocksize %d", blocksize);
2714 goto failed_mount;
2717 if (sb->s_blocksize != blocksize) {
2718 /* Validate the filesystem blocksize */
2719 if (!sb_set_blocksize(sb, blocksize)) {
2720 ext4_msg(sb, KERN_ERR, "bad block size %d",
2721 blocksize);
2722 goto failed_mount;
2725 brelse(bh);
2726 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
2727 offset = do_div(logical_sb_block, blocksize);
2728 bh = sb_bread(sb, logical_sb_block);
2729 if (!bh) {
2730 ext4_msg(sb, KERN_ERR,
2731 "Can't read superblock on 2nd try");
2732 goto failed_mount;
2734 es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
2735 sbi->s_es = es;
2736 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
2737 ext4_msg(sb, KERN_ERR,
2738 "Magic mismatch, very weird!");
2739 goto failed_mount;
2743 has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
2744 EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2745 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
2746 has_huge_files);
2747 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
2749 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
2750 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
2751 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
2752 } else {
2753 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
2754 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
2755 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
2756 (!is_power_of_2(sbi->s_inode_size)) ||
2757 (sbi->s_inode_size > blocksize)) {
2758 ext4_msg(sb, KERN_ERR,
2759 "unsupported inode size: %d",
2760 sbi->s_inode_size);
2761 goto failed_mount;
2763 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
2764 sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
2767 sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
2768 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
2769 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
2770 sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
2771 !is_power_of_2(sbi->s_desc_size)) {
2772 ext4_msg(sb, KERN_ERR,
2773 "unsupported descriptor size %lu",
2774 sbi->s_desc_size);
2775 goto failed_mount;
2777 } else
2778 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
2780 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
2781 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
2782 if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
2783 goto cantfind_ext4;
2785 sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
2786 if (sbi->s_inodes_per_block == 0)
2787 goto cantfind_ext4;
2788 sbi->s_itb_per_group = sbi->s_inodes_per_group /
2789 sbi->s_inodes_per_block;
2790 sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
2791 sbi->s_sbh = bh;
2792 sbi->s_mount_state = le16_to_cpu(es->s_state);
2793 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
2794 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
2796 for (i = 0; i < 4; i++)
2797 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
2798 sbi->s_def_hash_version = es->s_def_hash_version;
2799 i = le32_to_cpu(es->s_flags);
2800 if (i & EXT2_FLAGS_UNSIGNED_HASH)
2801 sbi->s_hash_unsigned = 3;
2802 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
2803 #ifdef __CHAR_UNSIGNED__
2804 es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
2805 sbi->s_hash_unsigned = 3;
2806 #else
2807 es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
2808 #endif
2809 sb->s_dirt = 1;
2812 if (sbi->s_blocks_per_group > blocksize * 8) {
2813 ext4_msg(sb, KERN_ERR,
2814 "#blocks per group too big: %lu",
2815 sbi->s_blocks_per_group);
2816 goto failed_mount;
2818 if (sbi->s_inodes_per_group > blocksize * 8) {
2819 ext4_msg(sb, KERN_ERR,
2820 "#inodes per group too big: %lu",
2821 sbi->s_inodes_per_group);
2822 goto failed_mount;
2826 * Test whether we have more sectors than will fit in sector_t,
2827 * and whether the max offset is addressable by the page cache.
2829 ret = generic_check_addressable(sb->s_blocksize_bits,
2830 ext4_blocks_count(es));
2831 if (ret) {
2832 ext4_msg(sb, KERN_ERR, "filesystem"
2833 " too large to mount safely on this system");
2834 if (sizeof(sector_t) < 8)
2835 ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
2836 goto failed_mount;
2839 if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
2840 goto cantfind_ext4;
2842 /* check blocks count against device size */
2843 blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
2844 if (blocks_count && ext4_blocks_count(es) > blocks_count) {
2845 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
2846 "exceeds size of device (%llu blocks)",
2847 ext4_blocks_count(es), blocks_count);
2848 goto failed_mount;
2852 * It makes no sense for the first data block to be beyond the end
2853 * of the filesystem.
2855 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
2856 ext4_msg(sb, KERN_WARNING, "bad geometry: first data"
2857 "block %u is beyond end of filesystem (%llu)",
2858 le32_to_cpu(es->s_first_data_block),
2859 ext4_blocks_count(es));
2860 goto failed_mount;
2862 blocks_count = (ext4_blocks_count(es) -
2863 le32_to_cpu(es->s_first_data_block) +
2864 EXT4_BLOCKS_PER_GROUP(sb) - 1);
2865 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
2866 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
2867 ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
2868 "(block count %llu, first data block %u, "
2869 "blocks per group %lu)", sbi->s_groups_count,
2870 ext4_blocks_count(es),
2871 le32_to_cpu(es->s_first_data_block),
2872 EXT4_BLOCKS_PER_GROUP(sb));
2873 goto failed_mount;
2875 sbi->s_groups_count = blocks_count;
2876 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
2877 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
2878 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
2879 EXT4_DESC_PER_BLOCK(sb);
2880 sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
2881 GFP_KERNEL);
2882 if (sbi->s_group_desc == NULL) {
2883 ext4_msg(sb, KERN_ERR, "not enough memory");
2884 goto failed_mount;
2887 #ifdef CONFIG_PROC_FS
2888 if (ext4_proc_root)
2889 sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
2890 #endif
2892 bgl_lock_init(sbi->s_blockgroup_lock);
2894 for (i = 0; i < db_count; i++) {
2895 block = descriptor_loc(sb, logical_sb_block, i);
2896 sbi->s_group_desc[i] = sb_bread(sb, block);
2897 if (!sbi->s_group_desc[i]) {
2898 ext4_msg(sb, KERN_ERR,
2899 "can't read group descriptor %d", i);
2900 db_count = i;
2901 goto failed_mount2;
2904 if (!ext4_check_descriptors(sb)) {
2905 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
2906 goto failed_mount2;
2908 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
2909 if (!ext4_fill_flex_info(sb)) {
2910 ext4_msg(sb, KERN_ERR,
2911 "unable to initialize "
2912 "flex_bg meta info!");
2913 goto failed_mount2;
2916 sbi->s_gdb_count = db_count;
2917 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2918 spin_lock_init(&sbi->s_next_gen_lock);
2920 sbi->s_stripe = ext4_get_stripe_size(sbi);
2921 sbi->s_max_writeback_mb_bump = 128;
2924 * set up enough so that it can read an inode
2926 if (!test_opt(sb, NOLOAD) &&
2927 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
2928 sb->s_op = &ext4_sops;
2929 else
2930 sb->s_op = &ext4_nojournal_sops;
2931 sb->s_export_op = &ext4_export_ops;
2932 sb->s_xattr = ext4_xattr_handlers;
2933 #ifdef CONFIG_QUOTA
2934 sb->s_qcop = &ext4_qctl_operations;
2935 sb->dq_op = &ext4_quota_operations;
2936 #endif
2937 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
2938 mutex_init(&sbi->s_orphan_lock);
2939 mutex_init(&sbi->s_resize_lock);
2941 sb->s_root = NULL;
2943 needs_recovery = (es->s_last_orphan != 0 ||
2944 EXT4_HAS_INCOMPAT_FEATURE(sb,
2945 EXT4_FEATURE_INCOMPAT_RECOVER));
2948 * The first inode we look at is the journal inode. Don't try
2949 * root first: it may be modified in the journal!
2951 if (!test_opt(sb, NOLOAD) &&
2952 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
2953 if (ext4_load_journal(sb, es, journal_devnum))
2954 goto failed_mount3;
2955 } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
2956 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
2957 ext4_msg(sb, KERN_ERR, "required journal recovery "
2958 "suppressed and not mounted read-only");
2959 goto failed_mount_wq;
2960 } else {
2961 clear_opt(sbi->s_mount_opt, DATA_FLAGS);
2962 set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
2963 sbi->s_journal = NULL;
2964 needs_recovery = 0;
2965 goto no_journal;
2968 if (ext4_blocks_count(es) > 0xffffffffULL &&
2969 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
2970 JBD2_FEATURE_INCOMPAT_64BIT)) {
2971 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
2972 goto failed_mount_wq;
2975 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
2976 jbd2_journal_set_features(sbi->s_journal,
2977 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
2978 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2979 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
2980 jbd2_journal_set_features(sbi->s_journal,
2981 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
2982 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
2983 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2984 } else {
2985 jbd2_journal_clear_features(sbi->s_journal,
2986 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
2987 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2990 /* We have now updated the journal if required, so we can
2991 * validate the data journaling mode. */
2992 switch (test_opt(sb, DATA_FLAGS)) {
2993 case 0:
2994 /* No mode set, assume a default based on the journal
2995 * capabilities: ORDERED_DATA if the journal can
2996 * cope, else JOURNAL_DATA
2998 if (jbd2_journal_check_available_features
2999 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
3000 set_opt(sbi->s_mount_opt, ORDERED_DATA);
3001 else
3002 set_opt(sbi->s_mount_opt, JOURNAL_DATA);
3003 break;
3005 case EXT4_MOUNT_ORDERED_DATA:
3006 case EXT4_MOUNT_WRITEBACK_DATA:
3007 if (!jbd2_journal_check_available_features
3008 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
3009 ext4_msg(sb, KERN_ERR, "Journal does not support "
3010 "requested data journaling mode");
3011 goto failed_mount_wq;
3013 default:
3014 break;
3016 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3018 no_journal:
3019 err = percpu_counter_init(&sbi->s_freeblocks_counter,
3020 ext4_count_free_blocks(sb));
3021 if (!err)
3022 err = percpu_counter_init(&sbi->s_freeinodes_counter,
3023 ext4_count_free_inodes(sb));
3024 if (!err)
3025 err = percpu_counter_init(&sbi->s_dirs_counter,
3026 ext4_count_dirs(sb));
3027 if (!err)
3028 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
3029 if (err) {
3030 ext4_msg(sb, KERN_ERR, "insufficient memory");
3031 goto failed_mount_wq;
3034 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
3035 if (!EXT4_SB(sb)->dio_unwritten_wq) {
3036 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
3037 goto failed_mount_wq;
3041 * The jbd2_journal_load will have done any necessary log recovery,
3042 * so we can safely mount the rest of the filesystem now.
3045 root = ext4_iget(sb, EXT4_ROOT_INO);
3046 if (IS_ERR(root)) {
3047 ext4_msg(sb, KERN_ERR, "get root inode failed");
3048 ret = PTR_ERR(root);
3049 goto failed_mount4;
3051 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
3052 iput(root);
3053 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
3054 goto failed_mount4;
3056 sb->s_root = d_alloc_root(root);
3057 if (!sb->s_root) {
3058 ext4_msg(sb, KERN_ERR, "get root dentry failed");
3059 iput(root);
3060 ret = -ENOMEM;
3061 goto failed_mount4;
3064 ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
3066 /* determine the minimum size of new large inodes, if present */
3067 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
3068 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
3069 EXT4_GOOD_OLD_INODE_SIZE;
3070 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3071 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
3072 if (sbi->s_want_extra_isize <
3073 le16_to_cpu(es->s_want_extra_isize))
3074 sbi->s_want_extra_isize =
3075 le16_to_cpu(es->s_want_extra_isize);
3076 if (sbi->s_want_extra_isize <
3077 le16_to_cpu(es->s_min_extra_isize))
3078 sbi->s_want_extra_isize =
3079 le16_to_cpu(es->s_min_extra_isize);
3082 /* Check if enough inode space is available */
3083 if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
3084 sbi->s_inode_size) {
3085 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
3086 EXT4_GOOD_OLD_INODE_SIZE;
3087 ext4_msg(sb, KERN_INFO, "required extra inode space not"
3088 "available");
3091 if (test_opt(sb, DELALLOC) &&
3092 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
3093 ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
3094 "requested data journaling mode");
3095 clear_opt(sbi->s_mount_opt, DELALLOC);
3097 if (test_opt(sb, DIOREAD_NOLOCK)) {
3098 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
3099 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
3100 "option - requested data journaling mode");
3101 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
3103 if (sb->s_blocksize < PAGE_SIZE) {
3104 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
3105 "option - block size is too small");
3106 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
3110 err = ext4_setup_system_zone(sb);
3111 if (err) {
3112 ext4_msg(sb, KERN_ERR, "failed to initialize system "
3113 "zone (%d)", err);
3114 goto failed_mount4;
3117 ext4_ext_init(sb);
3118 err = ext4_mb_init(sb, needs_recovery);
3119 if (err) {
3120 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
3121 err);
3122 goto failed_mount4;
3125 sbi->s_kobj.kset = ext4_kset;
3126 init_completion(&sbi->s_kobj_unregister);
3127 err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL,
3128 "%s", sb->s_id);
3129 if (err) {
3130 ext4_mb_release(sb);
3131 ext4_ext_release(sb);
3132 goto failed_mount4;
3135 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
3136 ext4_orphan_cleanup(sb, es);
3137 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
3138 if (needs_recovery) {
3139 ext4_msg(sb, KERN_INFO, "recovery complete");
3140 ext4_mark_recovery_complete(sb, es);
3142 if (EXT4_SB(sb)->s_journal) {
3143 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
3144 descr = " journalled data mode";
3145 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
3146 descr = " ordered data mode";
3147 else
3148 descr = " writeback data mode";
3149 } else
3150 descr = "out journal";
3152 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
3153 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
3154 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
3156 init_timer(&sbi->s_err_report);
3157 sbi->s_err_report.function = print_daily_error_info;
3158 sbi->s_err_report.data = (unsigned long) sb;
3159 if (es->s_error_count)
3160 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
3162 kfree(orig_data);
3163 return 0;
3165 cantfind_ext4:
3166 if (!silent)
3167 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
3168 goto failed_mount;
3170 failed_mount4:
3171 ext4_msg(sb, KERN_ERR, "mount failed");
3172 destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
3173 failed_mount_wq:
3174 ext4_release_system_zone(sb);
3175 if (sbi->s_journal) {
3176 jbd2_journal_destroy(sbi->s_journal);
3177 sbi->s_journal = NULL;
3179 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3180 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3181 percpu_counter_destroy(&sbi->s_dirs_counter);
3182 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3183 failed_mount3:
3184 if (sbi->s_flex_groups) {
3185 if (is_vmalloc_addr(sbi->s_flex_groups))
3186 vfree(sbi->s_flex_groups);
3187 else
3188 kfree(sbi->s_flex_groups);
3190 failed_mount2:
3191 for (i = 0; i < db_count; i++)
3192 brelse(sbi->s_group_desc[i]);
3193 kfree(sbi->s_group_desc);
3194 failed_mount:
3195 if (sbi->s_proc) {
3196 remove_proc_entry(sb->s_id, ext4_proc_root);
3198 #ifdef CONFIG_QUOTA
3199 for (i = 0; i < MAXQUOTAS; i++)
3200 kfree(sbi->s_qf_names[i]);
3201 #endif
3202 ext4_blkdev_remove(sbi);
3203 brelse(bh);
3204 out_fail:
3205 sb->s_fs_info = NULL;
3206 kfree(sbi->s_blockgroup_lock);
3207 kfree(sbi);
3208 out_free_orig:
3209 kfree(orig_data);
3210 return ret;
3214 * Setup any per-fs journal parameters now. We'll do this both on
3215 * initial mount, once the journal has been initialised but before we've
3216 * done any recovery; and again on any subsequent remount.
3218 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
3220 struct ext4_sb_info *sbi = EXT4_SB(sb);
3222 journal->j_commit_interval = sbi->s_commit_interval;
3223 journal->j_min_batch_time = sbi->s_min_batch_time;
3224 journal->j_max_batch_time = sbi->s_max_batch_time;
3226 write_lock(&journal->j_state_lock);
3227 if (test_opt(sb, BARRIER))
3228 journal->j_flags |= JBD2_BARRIER;
3229 else
3230 journal->j_flags &= ~JBD2_BARRIER;
3231 if (test_opt(sb, DATA_ERR_ABORT))
3232 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
3233 else
3234 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
3235 write_unlock(&journal->j_state_lock);
3238 static journal_t *ext4_get_journal(struct super_block *sb,
3239 unsigned int journal_inum)
3241 struct inode *journal_inode;
3242 journal_t *journal;
3244 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3246 /* First, test for the existence of a valid inode on disk. Bad
3247 * things happen if we iget() an unused inode, as the subsequent
3248 * iput() will try to delete it. */
3250 journal_inode = ext4_iget(sb, journal_inum);
3251 if (IS_ERR(journal_inode)) {
3252 ext4_msg(sb, KERN_ERR, "no journal found");
3253 return NULL;
3255 if (!journal_inode->i_nlink) {
3256 make_bad_inode(journal_inode);
3257 iput(journal_inode);
3258 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
3259 return NULL;
3262 jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
3263 journal_inode, journal_inode->i_size);
3264 if (!S_ISREG(journal_inode->i_mode)) {
3265 ext4_msg(sb, KERN_ERR, "invalid journal inode");
3266 iput(journal_inode);
3267 return NULL;
3270 journal = jbd2_journal_init_inode(journal_inode);
3271 if (!journal) {
3272 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
3273 iput(journal_inode);
3274 return NULL;
3276 journal->j_private = sb;
3277 ext4_init_journal_params(sb, journal);
3278 return journal;
3281 static journal_t *ext4_get_dev_journal(struct super_block *sb,
3282 dev_t j_dev)
3284 struct buffer_head *bh;
3285 journal_t *journal;
3286 ext4_fsblk_t start;
3287 ext4_fsblk_t len;
3288 int hblock, blocksize;
3289 ext4_fsblk_t sb_block;
3290 unsigned long offset;
3291 struct ext4_super_block *es;
3292 struct block_device *bdev;
3294 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3296 bdev = ext4_blkdev_get(j_dev, sb);
3297 if (bdev == NULL)
3298 return NULL;
3300 if (bd_claim(bdev, sb)) {
3301 ext4_msg(sb, KERN_ERR,
3302 "failed to claim external journal device");
3303 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
3304 return NULL;
3307 blocksize = sb->s_blocksize;
3308 hblock = bdev_logical_block_size(bdev);
3309 if (blocksize < hblock) {
3310 ext4_msg(sb, KERN_ERR,
3311 "blocksize too small for journal device");
3312 goto out_bdev;
3315 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
3316 offset = EXT4_MIN_BLOCK_SIZE % blocksize;
3317 set_blocksize(bdev, blocksize);
3318 if (!(bh = __bread(bdev, sb_block, blocksize))) {
3319 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
3320 "external journal");
3321 goto out_bdev;
3324 es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
3325 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
3326 !(le32_to_cpu(es->s_feature_incompat) &
3327 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
3328 ext4_msg(sb, KERN_ERR, "external journal has "
3329 "bad superblock");
3330 brelse(bh);
3331 goto out_bdev;
3334 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
3335 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
3336 brelse(bh);
3337 goto out_bdev;
3340 len = ext4_blocks_count(es);
3341 start = sb_block + 1;
3342 brelse(bh); /* we're done with the superblock */
3344 journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
3345 start, len, blocksize);
3346 if (!journal) {
3347 ext4_msg(sb, KERN_ERR, "failed to create device journal");
3348 goto out_bdev;
3350 journal->j_private = sb;
3351 ll_rw_block(READ, 1, &journal->j_sb_buffer);
3352 wait_on_buffer(journal->j_sb_buffer);
3353 if (!buffer_uptodate(journal->j_sb_buffer)) {
3354 ext4_msg(sb, KERN_ERR, "I/O error on journal device");
3355 goto out_journal;
3357 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
3358 ext4_msg(sb, KERN_ERR, "External journal has more than one "
3359 "user (unsupported) - %d",
3360 be32_to_cpu(journal->j_superblock->s_nr_users));
3361 goto out_journal;
3363 EXT4_SB(sb)->journal_bdev = bdev;
3364 ext4_init_journal_params(sb, journal);
3365 return journal;
3367 out_journal:
3368 jbd2_journal_destroy(journal);
3369 out_bdev:
3370 ext4_blkdev_put(bdev);
3371 return NULL;
3374 static int ext4_load_journal(struct super_block *sb,
3375 struct ext4_super_block *es,
3376 unsigned long journal_devnum)
3378 journal_t *journal;
3379 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
3380 dev_t journal_dev;
3381 int err = 0;
3382 int really_read_only;
3384 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3386 if (journal_devnum &&
3387 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
3388 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
3389 "numbers have changed");
3390 journal_dev = new_decode_dev(journal_devnum);
3391 } else
3392 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
3394 really_read_only = bdev_read_only(sb->s_bdev);
3397 * Are we loading a blank journal or performing recovery after a
3398 * crash? For recovery, we need to check in advance whether we
3399 * can get read-write access to the device.
3401 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
3402 if (sb->s_flags & MS_RDONLY) {
3403 ext4_msg(sb, KERN_INFO, "INFO: recovery "
3404 "required on readonly filesystem");
3405 if (really_read_only) {
3406 ext4_msg(sb, KERN_ERR, "write access "
3407 "unavailable, cannot proceed");
3408 return -EROFS;
3410 ext4_msg(sb, KERN_INFO, "write access will "
3411 "be enabled during recovery");
3415 if (journal_inum && journal_dev) {
3416 ext4_msg(sb, KERN_ERR, "filesystem has both journal "
3417 "and inode journals!");
3418 return -EINVAL;
3421 if (journal_inum) {
3422 if (!(journal = ext4_get_journal(sb, journal_inum)))
3423 return -EINVAL;
3424 } else {
3425 if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
3426 return -EINVAL;
3429 if (!(journal->j_flags & JBD2_BARRIER))
3430 ext4_msg(sb, KERN_INFO, "barriers disabled");
3432 if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
3433 err = jbd2_journal_update_format(journal);
3434 if (err) {
3435 ext4_msg(sb, KERN_ERR, "error updating journal");
3436 jbd2_journal_destroy(journal);
3437 return err;
3441 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER))
3442 err = jbd2_journal_wipe(journal, !really_read_only);
3443 if (!err) {
3444 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
3445 if (save)
3446 memcpy(save, ((char *) es) +
3447 EXT4_S_ERR_START, EXT4_S_ERR_LEN);
3448 err = jbd2_journal_load(journal);
3449 if (save)
3450 memcpy(((char *) es) + EXT4_S_ERR_START,
3451 save, EXT4_S_ERR_LEN);
3452 kfree(save);
3455 if (err) {
3456 ext4_msg(sb, KERN_ERR, "error loading journal");
3457 jbd2_journal_destroy(journal);
3458 return err;
3461 EXT4_SB(sb)->s_journal = journal;
3462 ext4_clear_journal_err(sb, es);
3464 if (journal_devnum &&
3465 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
3466 es->s_journal_dev = cpu_to_le32(journal_devnum);
3468 /* Make sure we flush the recovery flag to disk. */
3469 ext4_commit_super(sb, 1);
3472 return 0;
3475 static int ext4_commit_super(struct super_block *sb, int sync)
3477 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
3478 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
3479 int error = 0;
3481 if (!sbh)
3482 return error;
3483 if (buffer_write_io_error(sbh)) {
3485 * Oh, dear. A previous attempt to write the
3486 * superblock failed. This could happen because the
3487 * USB device was yanked out. Or it could happen to
3488 * be a transient write error and maybe the block will
3489 * be remapped. Nothing we can do but to retry the
3490 * write and hope for the best.
3492 ext4_msg(sb, KERN_ERR, "previous I/O error to "
3493 "superblock detected");
3494 clear_buffer_write_io_error(sbh);
3495 set_buffer_uptodate(sbh);
3498 * If the file system is mounted read-only, don't update the
3499 * superblock write time. This avoids updating the superblock
3500 * write time when we are mounting the root file system
3501 * read/only but we need to replay the journal; at that point,
3502 * for people who are east of GMT and who make their clock
3503 * tick in localtime for Windows bug-for-bug compatibility,
3504 * the clock is set in the future, and this will cause e2fsck
3505 * to complain and force a full file system check.
3507 if (!(sb->s_flags & MS_RDONLY))
3508 es->s_wtime = cpu_to_le32(get_seconds());
3509 if (sb->s_bdev->bd_part)
3510 es->s_kbytes_written =
3511 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
3512 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
3513 EXT4_SB(sb)->s_sectors_written_start) >> 1));
3514 else
3515 es->s_kbytes_written =
3516 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
3517 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
3518 &EXT4_SB(sb)->s_freeblocks_counter));
3519 es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
3520 &EXT4_SB(sb)->s_freeinodes_counter));
3521 sb->s_dirt = 0;
3522 BUFFER_TRACE(sbh, "marking dirty");
3523 mark_buffer_dirty(sbh);
3524 if (sync) {
3525 error = sync_dirty_buffer(sbh);
3526 if (error)
3527 return error;
3529 error = buffer_write_io_error(sbh);
3530 if (error) {
3531 ext4_msg(sb, KERN_ERR, "I/O error while writing "
3532 "superblock");
3533 clear_buffer_write_io_error(sbh);
3534 set_buffer_uptodate(sbh);
3537 return error;
3541 * Have we just finished recovery? If so, and if we are mounting (or
3542 * remounting) the filesystem readonly, then we will end up with a
3543 * consistent fs on disk. Record that fact.
3545 static void ext4_mark_recovery_complete(struct super_block *sb,
3546 struct ext4_super_block *es)
3548 journal_t *journal = EXT4_SB(sb)->s_journal;
3550 if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
3551 BUG_ON(journal != NULL);
3552 return;
3554 jbd2_journal_lock_updates(journal);
3555 if (jbd2_journal_flush(journal) < 0)
3556 goto out;
3558 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) &&
3559 sb->s_flags & MS_RDONLY) {
3560 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3561 ext4_commit_super(sb, 1);
3564 out:
3565 jbd2_journal_unlock_updates(journal);
3569 * If we are mounting (or read-write remounting) a filesystem whose journal
3570 * has recorded an error from a previous lifetime, move that error to the
3571 * main filesystem now.
3573 static void ext4_clear_journal_err(struct super_block *sb,
3574 struct ext4_super_block *es)
3576 journal_t *journal;
3577 int j_errno;
3578 const char *errstr;
3580 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3582 journal = EXT4_SB(sb)->s_journal;
3585 * Now check for any error status which may have been recorded in the
3586 * journal by a prior ext4_error() or ext4_abort()
3589 j_errno = jbd2_journal_errno(journal);
3590 if (j_errno) {
3591 char nbuf[16];
3593 errstr = ext4_decode_error(sb, j_errno, nbuf);
3594 ext4_warning(sb, "Filesystem error recorded "
3595 "from previous mount: %s", errstr);
3596 ext4_warning(sb, "Marking fs in need of filesystem check.");
3598 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
3599 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
3600 ext4_commit_super(sb, 1);
3602 jbd2_journal_clear_err(journal);
3607 * Force the running and committing transactions to commit,
3608 * and wait on the commit.
3610 int ext4_force_commit(struct super_block *sb)
3612 journal_t *journal;
3613 int ret = 0;
3615 if (sb->s_flags & MS_RDONLY)
3616 return 0;
3618 journal = EXT4_SB(sb)->s_journal;
3619 if (journal) {
3620 vfs_check_frozen(sb, SB_FREEZE_TRANS);
3621 ret = ext4_journal_force_commit(journal);
3624 return ret;
3627 static void ext4_write_super(struct super_block *sb)
3629 lock_super(sb);
3630 ext4_commit_super(sb, 1);
3631 unlock_super(sb);
3634 static int ext4_sync_fs(struct super_block *sb, int wait)
3636 int ret = 0;
3637 tid_t target;
3638 struct ext4_sb_info *sbi = EXT4_SB(sb);
3640 trace_ext4_sync_fs(sb, wait);
3641 flush_workqueue(sbi->dio_unwritten_wq);
3642 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
3643 if (wait)
3644 jbd2_log_wait_commit(sbi->s_journal, target);
3646 return ret;
3650 * LVM calls this function before a (read-only) snapshot is created. This
3651 * gives us a chance to flush the journal completely and mark the fs clean.
3653 static int ext4_freeze(struct super_block *sb)
3655 int error = 0;
3656 journal_t *journal;
3658 if (sb->s_flags & MS_RDONLY)
3659 return 0;
3661 journal = EXT4_SB(sb)->s_journal;
3663 /* Now we set up the journal barrier. */
3664 jbd2_journal_lock_updates(journal);
3667 * Don't clear the needs_recovery flag if we failed to flush
3668 * the journal.
3670 error = jbd2_journal_flush(journal);
3671 if (error < 0)
3672 goto out;
3674 /* Journal blocked and flushed, clear needs_recovery flag. */
3675 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3676 error = ext4_commit_super(sb, 1);
3677 out:
3678 /* we rely on s_frozen to stop further updates */
3679 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
3680 return error;
3684 * Called by LVM after the snapshot is done. We need to reset the RECOVER
3685 * flag here, even though the filesystem is not technically dirty yet.
3687 static int ext4_unfreeze(struct super_block *sb)
3689 if (sb->s_flags & MS_RDONLY)
3690 return 0;
3692 lock_super(sb);
3693 /* Reset the needs_recovery flag before the fs is unlocked. */
3694 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3695 ext4_commit_super(sb, 1);
3696 unlock_super(sb);
3697 return 0;
3700 static int ext4_remount(struct super_block *sb, int *flags, char *data)
3702 struct ext4_super_block *es;
3703 struct ext4_sb_info *sbi = EXT4_SB(sb);
3704 ext4_fsblk_t n_blocks_count = 0;
3705 unsigned long old_sb_flags;
3706 struct ext4_mount_options old_opts;
3707 int enable_quota = 0;
3708 ext4_group_t g;
3709 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3710 int err;
3711 #ifdef CONFIG_QUOTA
3712 int i;
3713 #endif
3714 char *orig_data = kstrdup(data, GFP_KERNEL);
3716 /* Store the original options */
3717 lock_super(sb);
3718 old_sb_flags = sb->s_flags;
3719 old_opts.s_mount_opt = sbi->s_mount_opt;
3720 old_opts.s_resuid = sbi->s_resuid;
3721 old_opts.s_resgid = sbi->s_resgid;
3722 old_opts.s_commit_interval = sbi->s_commit_interval;
3723 old_opts.s_min_batch_time = sbi->s_min_batch_time;
3724 old_opts.s_max_batch_time = sbi->s_max_batch_time;
3725 #ifdef CONFIG_QUOTA
3726 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
3727 for (i = 0; i < MAXQUOTAS; i++)
3728 old_opts.s_qf_names[i] = sbi->s_qf_names[i];
3729 #endif
3730 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
3731 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
3734 * Allow the "check" option to be passed as a remount option.
3736 if (!parse_options(data, sb, NULL, &journal_ioprio,
3737 &n_blocks_count, 1)) {
3738 err = -EINVAL;
3739 goto restore_opts;
3742 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
3743 ext4_abort(sb, "Abort forced by user");
3745 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3746 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3748 es = sbi->s_es;
3750 if (sbi->s_journal) {
3751 ext4_init_journal_params(sb, sbi->s_journal);
3752 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3755 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
3756 n_blocks_count > ext4_blocks_count(es)) {
3757 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
3758 err = -EROFS;
3759 goto restore_opts;
3762 if (*flags & MS_RDONLY) {
3763 err = dquot_suspend(sb, -1);
3764 if (err < 0)
3765 goto restore_opts;
3768 * First of all, the unconditional stuff we have to do
3769 * to disable replay of the journal when we next remount
3771 sb->s_flags |= MS_RDONLY;
3774 * OK, test if we are remounting a valid rw partition
3775 * readonly, and if so set the rdonly flag and then
3776 * mark the partition as valid again.
3778 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
3779 (sbi->s_mount_state & EXT4_VALID_FS))
3780 es->s_state = cpu_to_le16(sbi->s_mount_state);
3782 if (sbi->s_journal)
3783 ext4_mark_recovery_complete(sb, es);
3784 } else {
3785 /* Make sure we can mount this feature set readwrite */
3786 if (!ext4_feature_set_ok(sb, 0)) {
3787 err = -EROFS;
3788 goto restore_opts;
3791 * Make sure the group descriptor checksums
3792 * are sane. If they aren't, refuse to remount r/w.
3794 for (g = 0; g < sbi->s_groups_count; g++) {
3795 struct ext4_group_desc *gdp =
3796 ext4_get_group_desc(sb, g, NULL);
3798 if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
3799 ext4_msg(sb, KERN_ERR,
3800 "ext4_remount: Checksum for group %u failed (%u!=%u)",
3801 g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
3802 le16_to_cpu(gdp->bg_checksum));
3803 err = -EINVAL;
3804 goto restore_opts;
3809 * If we have an unprocessed orphan list hanging
3810 * around from a previously readonly bdev mount,
3811 * require a full umount/remount for now.
3813 if (es->s_last_orphan) {
3814 ext4_msg(sb, KERN_WARNING, "Couldn't "
3815 "remount RDWR because of unprocessed "
3816 "orphan inode list. Please "
3817 "umount/remount instead");
3818 err = -EINVAL;
3819 goto restore_opts;
3823 * Mounting a RDONLY partition read-write, so reread
3824 * and store the current valid flag. (It may have
3825 * been changed by e2fsck since we originally mounted
3826 * the partition.)
3828 if (sbi->s_journal)
3829 ext4_clear_journal_err(sb, es);
3830 sbi->s_mount_state = le16_to_cpu(es->s_state);
3831 if ((err = ext4_group_extend(sb, es, n_blocks_count)))
3832 goto restore_opts;
3833 if (!ext4_setup_super(sb, es, 0))
3834 sb->s_flags &= ~MS_RDONLY;
3835 enable_quota = 1;
3838 ext4_setup_system_zone(sb);
3839 if (sbi->s_journal == NULL)
3840 ext4_commit_super(sb, 1);
3842 #ifdef CONFIG_QUOTA
3843 /* Release old quota file names */
3844 for (i = 0; i < MAXQUOTAS; i++)
3845 if (old_opts.s_qf_names[i] &&
3846 old_opts.s_qf_names[i] != sbi->s_qf_names[i])
3847 kfree(old_opts.s_qf_names[i]);
3848 #endif
3849 unlock_super(sb);
3850 if (enable_quota)
3851 dquot_resume(sb, -1);
3853 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
3854 kfree(orig_data);
3855 return 0;
3857 restore_opts:
3858 sb->s_flags = old_sb_flags;
3859 sbi->s_mount_opt = old_opts.s_mount_opt;
3860 sbi->s_resuid = old_opts.s_resuid;
3861 sbi->s_resgid = old_opts.s_resgid;
3862 sbi->s_commit_interval = old_opts.s_commit_interval;
3863 sbi->s_min_batch_time = old_opts.s_min_batch_time;
3864 sbi->s_max_batch_time = old_opts.s_max_batch_time;
3865 #ifdef CONFIG_QUOTA
3866 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
3867 for (i = 0; i < MAXQUOTAS; i++) {
3868 if (sbi->s_qf_names[i] &&
3869 old_opts.s_qf_names[i] != sbi->s_qf_names[i])
3870 kfree(sbi->s_qf_names[i]);
3871 sbi->s_qf_names[i] = old_opts.s_qf_names[i];
3873 #endif
3874 unlock_super(sb);
3875 kfree(orig_data);
3876 return err;
3879 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
3881 struct super_block *sb = dentry->d_sb;
3882 struct ext4_sb_info *sbi = EXT4_SB(sb);
3883 struct ext4_super_block *es = sbi->s_es;
3884 u64 fsid;
3886 if (test_opt(sb, MINIX_DF)) {
3887 sbi->s_overhead_last = 0;
3888 } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
3889 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3890 ext4_fsblk_t overhead = 0;
3893 * Compute the overhead (FS structures). This is constant
3894 * for a given filesystem unless the number of block groups
3895 * changes so we cache the previous value until it does.
3899 * All of the blocks before first_data_block are
3900 * overhead
3902 overhead = le32_to_cpu(es->s_first_data_block);
3905 * Add the overhead attributed to the superblock and
3906 * block group descriptors. If the sparse superblocks
3907 * feature is turned on, then not all groups have this.
3909 for (i = 0; i < ngroups; i++) {
3910 overhead += ext4_bg_has_super(sb, i) +
3911 ext4_bg_num_gdb(sb, i);
3912 cond_resched();
3916 * Every block group has an inode bitmap, a block
3917 * bitmap, and an inode table.
3919 overhead += ngroups * (2 + sbi->s_itb_per_group);
3920 sbi->s_overhead_last = overhead;
3921 smp_wmb();
3922 sbi->s_blocks_last = ext4_blocks_count(es);
3925 buf->f_type = EXT4_SUPER_MAGIC;
3926 buf->f_bsize = sb->s_blocksize;
3927 buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
3928 buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
3929 percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
3930 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
3931 if (buf->f_bfree < ext4_r_blocks_count(es))
3932 buf->f_bavail = 0;
3933 buf->f_files = le32_to_cpu(es->s_inodes_count);
3934 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
3935 buf->f_namelen = EXT4_NAME_LEN;
3936 fsid = le64_to_cpup((void *)es->s_uuid) ^
3937 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
3938 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
3939 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
3941 return 0;
3944 /* Helper function for writing quotas on sync - we need to start transaction
3945 * before quota file is locked for write. Otherwise the are possible deadlocks:
3946 * Process 1 Process 2
3947 * ext4_create() quota_sync()
3948 * jbd2_journal_start() write_dquot()
3949 * dquot_initialize() down(dqio_mutex)
3950 * down(dqio_mutex) jbd2_journal_start()
3954 #ifdef CONFIG_QUOTA
3956 static inline struct inode *dquot_to_inode(struct dquot *dquot)
3958 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
3961 static int ext4_write_dquot(struct dquot *dquot)
3963 int ret, err;
3964 handle_t *handle;
3965 struct inode *inode;
3967 inode = dquot_to_inode(dquot);
3968 handle = ext4_journal_start(inode,
3969 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
3970 if (IS_ERR(handle))
3971 return PTR_ERR(handle);
3972 ret = dquot_commit(dquot);
3973 err = ext4_journal_stop(handle);
3974 if (!ret)
3975 ret = err;
3976 return ret;
3979 static int ext4_acquire_dquot(struct dquot *dquot)
3981 int ret, err;
3982 handle_t *handle;
3984 handle = ext4_journal_start(dquot_to_inode(dquot),
3985 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
3986 if (IS_ERR(handle))
3987 return PTR_ERR(handle);
3988 ret = dquot_acquire(dquot);
3989 err = ext4_journal_stop(handle);
3990 if (!ret)
3991 ret = err;
3992 return ret;
3995 static int ext4_release_dquot(struct dquot *dquot)
3997 int ret, err;
3998 handle_t *handle;
4000 handle = ext4_journal_start(dquot_to_inode(dquot),
4001 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
4002 if (IS_ERR(handle)) {
4003 /* Release dquot anyway to avoid endless cycle in dqput() */
4004 dquot_release(dquot);
4005 return PTR_ERR(handle);
4007 ret = dquot_release(dquot);
4008 err = ext4_journal_stop(handle);
4009 if (!ret)
4010 ret = err;
4011 return ret;
4014 static int ext4_mark_dquot_dirty(struct dquot *dquot)
4016 /* Are we journaling quotas? */
4017 if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
4018 EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
4019 dquot_mark_dquot_dirty(dquot);
4020 return ext4_write_dquot(dquot);
4021 } else {
4022 return dquot_mark_dquot_dirty(dquot);
4026 static int ext4_write_info(struct super_block *sb, int type)
4028 int ret, err;
4029 handle_t *handle;
4031 /* Data block + inode block */
4032 handle = ext4_journal_start(sb->s_root->d_inode, 2);
4033 if (IS_ERR(handle))
4034 return PTR_ERR(handle);
4035 ret = dquot_commit_info(sb, type);
4036 err = ext4_journal_stop(handle);
4037 if (!ret)
4038 ret = err;
4039 return ret;
4043 * Turn on quotas during mount time - we need to find
4044 * the quota file and such...
4046 static int ext4_quota_on_mount(struct super_block *sb, int type)
4048 return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
4049 EXT4_SB(sb)->s_jquota_fmt, type);
4053 * Standard function to be called on quota_on
4055 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4056 char *name)
4058 int err;
4059 struct path path;
4061 if (!test_opt(sb, QUOTA))
4062 return -EINVAL;
4064 err = kern_path(name, LOOKUP_FOLLOW, &path);
4065 if (err)
4066 return err;
4068 /* Quotafile not on the same filesystem? */
4069 if (path.mnt->mnt_sb != sb) {
4070 path_put(&path);
4071 return -EXDEV;
4073 /* Journaling quota? */
4074 if (EXT4_SB(sb)->s_qf_names[type]) {
4075 /* Quotafile not in fs root? */
4076 if (path.dentry->d_parent != sb->s_root)
4077 ext4_msg(sb, KERN_WARNING,
4078 "Quota file not on filesystem root. "
4079 "Journaled quota will not work");
4083 * When we journal data on quota file, we have to flush journal to see
4084 * all updates to the file when we bypass pagecache...
4086 if (EXT4_SB(sb)->s_journal &&
4087 ext4_should_journal_data(path.dentry->d_inode)) {
4089 * We don't need to lock updates but journal_flush() could
4090 * otherwise be livelocked...
4092 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
4093 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
4094 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
4095 if (err) {
4096 path_put(&path);
4097 return err;
4101 err = dquot_quota_on_path(sb, type, format_id, &path);
4102 path_put(&path);
4103 return err;
4106 static int ext4_quota_off(struct super_block *sb, int type)
4108 /* Force all delayed allocation blocks to be allocated */
4109 if (test_opt(sb, DELALLOC)) {
4110 down_read(&sb->s_umount);
4111 sync_filesystem(sb);
4112 up_read(&sb->s_umount);
4115 return dquot_quota_off(sb, type);
4118 /* Read data from quotafile - avoid pagecache and such because we cannot afford
4119 * acquiring the locks... As quota files are never truncated and quota code
4120 * itself serializes the operations (and noone else should touch the files)
4121 * we don't have to be afraid of races */
4122 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
4123 size_t len, loff_t off)
4125 struct inode *inode = sb_dqopt(sb)->files[type];
4126 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
4127 int err = 0;
4128 int offset = off & (sb->s_blocksize - 1);
4129 int tocopy;
4130 size_t toread;
4131 struct buffer_head *bh;
4132 loff_t i_size = i_size_read(inode);
4134 if (off > i_size)
4135 return 0;
4136 if (off+len > i_size)
4137 len = i_size-off;
4138 toread = len;
4139 while (toread > 0) {
4140 tocopy = sb->s_blocksize - offset < toread ?
4141 sb->s_blocksize - offset : toread;
4142 bh = ext4_bread(NULL, inode, blk, 0, &err);
4143 if (err)
4144 return err;
4145 if (!bh) /* A hole? */
4146 memset(data, 0, tocopy);
4147 else
4148 memcpy(data, bh->b_data+offset, tocopy);
4149 brelse(bh);
4150 offset = 0;
4151 toread -= tocopy;
4152 data += tocopy;
4153 blk++;
4155 return len;
4158 /* Write to quotafile (we know the transaction is already started and has
4159 * enough credits) */
4160 static ssize_t ext4_quota_write(struct super_block *sb, int type,
4161 const char *data, size_t len, loff_t off)
4163 struct inode *inode = sb_dqopt(sb)->files[type];
4164 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
4165 int err = 0;
4166 int offset = off & (sb->s_blocksize - 1);
4167 struct buffer_head *bh;
4168 handle_t *handle = journal_current_handle();
4170 if (EXT4_SB(sb)->s_journal && !handle) {
4171 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
4172 " cancelled because transaction is not started",
4173 (unsigned long long)off, (unsigned long long)len);
4174 return -EIO;
4177 * Since we account only one data block in transaction credits,
4178 * then it is impossible to cross a block boundary.
4180 if (sb->s_blocksize - offset < len) {
4181 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
4182 " cancelled because not block aligned",
4183 (unsigned long long)off, (unsigned long long)len);
4184 return -EIO;
4187 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
4188 bh = ext4_bread(handle, inode, blk, 1, &err);
4189 if (!bh)
4190 goto out;
4191 err = ext4_journal_get_write_access(handle, bh);
4192 if (err) {
4193 brelse(bh);
4194 goto out;
4196 lock_buffer(bh);
4197 memcpy(bh->b_data+offset, data, len);
4198 flush_dcache_page(bh->b_page);
4199 unlock_buffer(bh);
4200 err = ext4_handle_dirty_metadata(handle, NULL, bh);
4201 brelse(bh);
4202 out:
4203 if (err) {
4204 mutex_unlock(&inode->i_mutex);
4205 return err;
4207 if (inode->i_size < off + len) {
4208 i_size_write(inode, off + len);
4209 EXT4_I(inode)->i_disksize = inode->i_size;
4211 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
4212 ext4_mark_inode_dirty(handle, inode);
4213 mutex_unlock(&inode->i_mutex);
4214 return len;
4217 #endif
4219 static int ext4_get_sb(struct file_system_type *fs_type, int flags,
4220 const char *dev_name, void *data, struct vfsmount *mnt)
4222 return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt);
4225 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
4226 static struct file_system_type ext2_fs_type = {
4227 .owner = THIS_MODULE,
4228 .name = "ext2",
4229 .get_sb = ext4_get_sb,
4230 .kill_sb = kill_block_super,
4231 .fs_flags = FS_REQUIRES_DEV,
4234 static inline void register_as_ext2(void)
4236 int err = register_filesystem(&ext2_fs_type);
4237 if (err)
4238 printk(KERN_WARNING
4239 "EXT4-fs: Unable to register as ext2 (%d)\n", err);
4242 static inline void unregister_as_ext2(void)
4244 unregister_filesystem(&ext2_fs_type);
4246 MODULE_ALIAS("ext2");
4247 #else
4248 static inline void register_as_ext2(void) { }
4249 static inline void unregister_as_ext2(void) { }
4250 #endif
4252 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
4253 static inline void register_as_ext3(void)
4255 int err = register_filesystem(&ext3_fs_type);
4256 if (err)
4257 printk(KERN_WARNING
4258 "EXT4-fs: Unable to register as ext3 (%d)\n", err);
4261 static inline void unregister_as_ext3(void)
4263 unregister_filesystem(&ext3_fs_type);
4265 MODULE_ALIAS("ext3");
4266 #else
4267 static inline void register_as_ext3(void) { }
4268 static inline void unregister_as_ext3(void) { }
4269 #endif
4271 static struct file_system_type ext4_fs_type = {
4272 .owner = THIS_MODULE,
4273 .name = "ext4",
4274 .get_sb = ext4_get_sb,
4275 .kill_sb = kill_block_super,
4276 .fs_flags = FS_REQUIRES_DEV,
4279 static int __init init_ext4_fs(void)
4281 int err;
4283 ext4_check_flag_values();
4284 err = init_ext4_system_zone();
4285 if (err)
4286 return err;
4287 ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
4288 if (!ext4_kset)
4289 goto out4;
4290 ext4_proc_root = proc_mkdir("fs/ext4", NULL);
4291 err = init_ext4_mballoc();
4292 if (err)
4293 goto out3;
4295 err = init_ext4_xattr();
4296 if (err)
4297 goto out2;
4298 err = init_inodecache();
4299 if (err)
4300 goto out1;
4301 register_as_ext2();
4302 register_as_ext3();
4303 err = register_filesystem(&ext4_fs_type);
4304 if (err)
4305 goto out;
4306 return 0;
4307 out:
4308 unregister_as_ext2();
4309 unregister_as_ext3();
4310 destroy_inodecache();
4311 out1:
4312 exit_ext4_xattr();
4313 out2:
4314 exit_ext4_mballoc();
4315 out3:
4316 remove_proc_entry("fs/ext4", NULL);
4317 kset_unregister(ext4_kset);
4318 out4:
4319 exit_ext4_system_zone();
4320 return err;
4323 static void __exit exit_ext4_fs(void)
4325 unregister_as_ext2();
4326 unregister_as_ext3();
4327 unregister_filesystem(&ext4_fs_type);
4328 destroy_inodecache();
4329 exit_ext4_xattr();
4330 exit_ext4_mballoc();
4331 remove_proc_entry("fs/ext4", NULL);
4332 kset_unregister(ext4_kset);
4333 exit_ext4_system_zone();
4336 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
4337 MODULE_DESCRIPTION("Fourth Extended Filesystem");
4338 MODULE_LICENSE("GPL");
4339 module_init(init_ext4_fs)
4340 module_exit(exit_ext4_fs)