2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/module.h>
22 #include <linux/parser.h>
23 #include <linux/completion.h>
24 #include <linux/vfs.h>
25 #include <linux/quotaops.h>
26 #include <linux/mount.h>
27 #include <linux/moduleparam.h>
28 #include <linux/kthread.h>
29 #include <linux/posix_acl.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/crc32.h>
33 #include <linux/slab.h>
34 #include <asm/uaccess.h>
35 #include <linux/seq_file.h>
36 #include <linux/blkdev.h>
38 #include "jfs_incore.h"
39 #include "jfs_filsys.h"
40 #include "jfs_inode.h"
41 #include "jfs_metapage.h"
42 #include "jfs_superblock.h"
46 #include "jfs_debug.h"
48 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
49 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
50 MODULE_LICENSE("GPL");
52 static struct kmem_cache
* jfs_inode_cachep
;
54 static const struct super_operations jfs_super_operations
;
55 static const struct export_operations jfs_export_operations
;
56 static struct file_system_type jfs_fs_type
;
58 #define MAX_COMMIT_THREADS 64
59 static int commit_threads
= 0;
60 module_param(commit_threads
, int, 0);
61 MODULE_PARM_DESC(commit_threads
, "Number of commit threads");
63 static struct task_struct
*jfsCommitThread
[MAX_COMMIT_THREADS
];
64 struct task_struct
*jfsIOthread
;
65 struct task_struct
*jfsSyncThread
;
67 #ifdef CONFIG_JFS_DEBUG
68 int jfsloglevel
= JFS_LOGLEVEL_WARN
;
69 module_param(jfsloglevel
, int, 0644);
70 MODULE_PARM_DESC(jfsloglevel
, "Specify JFS loglevel (0, 1 or 2)");
73 static void jfs_handle_error(struct super_block
*sb
)
75 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
77 if (sb
->s_flags
& MS_RDONLY
)
80 updateSuper(sb
, FM_DIRTY
);
82 if (sbi
->flag
& JFS_ERR_PANIC
)
83 panic("JFS (device %s): panic forced after error\n",
85 else if (sbi
->flag
& JFS_ERR_REMOUNT_RO
) {
86 jfs_err("ERROR: (device %s): remounting filesystem "
89 sb
->s_flags
|= MS_RDONLY
;
92 /* nothing is done for continue beyond marking the superblock dirty */
95 void jfs_error(struct super_block
*sb
, const char * function
, ...)
97 static char error_buf
[256];
100 va_start(args
, function
);
101 vsnprintf(error_buf
, sizeof(error_buf
), function
, args
);
104 pr_err("ERROR: (device %s): %s\n", sb
->s_id
, error_buf
);
106 jfs_handle_error(sb
);
109 static struct inode
*jfs_alloc_inode(struct super_block
*sb
)
111 struct jfs_inode_info
*jfs_inode
;
113 jfs_inode
= kmem_cache_alloc(jfs_inode_cachep
, GFP_NOFS
);
116 return &jfs_inode
->vfs_inode
;
119 static void jfs_i_callback(struct rcu_head
*head
)
121 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
122 struct jfs_inode_info
*ji
= JFS_IP(inode
);
123 kmem_cache_free(jfs_inode_cachep
, ji
);
126 static void jfs_destroy_inode(struct inode
*inode
)
128 struct jfs_inode_info
*ji
= JFS_IP(inode
);
130 BUG_ON(!list_empty(&ji
->anon_inode_list
));
132 spin_lock_irq(&ji
->ag_lock
);
133 if (ji
->active_ag
!= -1) {
134 struct bmap
*bmap
= JFS_SBI(inode
->i_sb
)->bmap
;
135 atomic_dec(&bmap
->db_active
[ji
->active_ag
]);
138 spin_unlock_irq(&ji
->ag_lock
);
139 call_rcu(&inode
->i_rcu
, jfs_i_callback
);
142 static int jfs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
144 struct jfs_sb_info
*sbi
= JFS_SBI(dentry
->d_sb
);
146 struct inomap
*imap
= JFS_IP(sbi
->ipimap
)->i_imap
;
148 jfs_info("In jfs_statfs");
149 buf
->f_type
= JFS_SUPER_MAGIC
;
150 buf
->f_bsize
= sbi
->bsize
;
151 buf
->f_blocks
= sbi
->bmap
->db_mapsize
;
152 buf
->f_bfree
= sbi
->bmap
->db_nfree
;
153 buf
->f_bavail
= sbi
->bmap
->db_nfree
;
155 * If we really return the number of allocated & free inodes, some
156 * applications will fail because they won't see enough free inodes.
157 * We'll try to calculate some guess as to how many inodes we can
160 * buf->f_files = atomic_read(&imap->im_numinos);
161 * buf->f_ffree = atomic_read(&imap->im_numfree);
163 maxinodes
= min((s64
) atomic_read(&imap
->im_numinos
) +
164 ((sbi
->bmap
->db_nfree
>> imap
->im_l2nbperiext
)
165 << L2INOSPEREXT
), (s64
) 0xffffffffLL
);
166 buf
->f_files
= maxinodes
;
167 buf
->f_ffree
= maxinodes
- (atomic_read(&imap
->im_numinos
) -
168 atomic_read(&imap
->im_numfree
));
169 buf
->f_fsid
.val
[0] = (u32
)crc32_le(0, sbi
->uuid
, sizeof(sbi
->uuid
)/2);
170 buf
->f_fsid
.val
[1] = (u32
)crc32_le(0, sbi
->uuid
+ sizeof(sbi
->uuid
)/2,
171 sizeof(sbi
->uuid
)/2);
173 buf
->f_namelen
= JFS_NAME_MAX
;
177 static void jfs_put_super(struct super_block
*sb
)
179 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
182 jfs_info("In jfs_put_super");
184 dquot_disable(sb
, -1, DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
188 jfs_err("jfs_umount failed with return code %d", rc
);
190 unload_nls(sbi
->nls_tab
);
192 truncate_inode_pages(sbi
->direct_inode
->i_mapping
, 0);
193 iput(sbi
->direct_inode
);
199 Opt_integrity
, Opt_nointegrity
, Opt_iocharset
, Opt_resize
,
200 Opt_resize_nosize
, Opt_errors
, Opt_ignore
, Opt_err
, Opt_quota
,
201 Opt_usrquota
, Opt_grpquota
, Opt_uid
, Opt_gid
, Opt_umask
,
202 Opt_discard
, Opt_nodiscard
, Opt_discard_minblk
205 static const match_table_t tokens
= {
206 {Opt_integrity
, "integrity"},
207 {Opt_nointegrity
, "nointegrity"},
208 {Opt_iocharset
, "iocharset=%s"},
209 {Opt_resize
, "resize=%u"},
210 {Opt_resize_nosize
, "resize"},
211 {Opt_errors
, "errors=%s"},
212 {Opt_ignore
, "noquota"},
213 {Opt_ignore
, "quota"},
214 {Opt_usrquota
, "usrquota"},
215 {Opt_grpquota
, "grpquota"},
218 {Opt_umask
, "umask=%u"},
219 {Opt_discard
, "discard"},
220 {Opt_nodiscard
, "nodiscard"},
221 {Opt_discard_minblk
, "discard=%u"},
225 static int parse_options(char *options
, struct super_block
*sb
, s64
*newLVSize
,
228 void *nls_map
= (void *)-1; /* -1: no change; NULL: none */
230 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
237 while ((p
= strsep(&options
, ",")) != NULL
) {
238 substring_t args
[MAX_OPT_ARGS
];
243 token
= match_token(p
, tokens
, args
);
246 *flag
&= ~JFS_NOINTEGRITY
;
248 case Opt_nointegrity
:
249 *flag
|= JFS_NOINTEGRITY
;
252 /* Silently ignore the quota options */
253 /* Don't do anything ;-) */
256 if (nls_map
&& nls_map
!= (void *) -1)
258 if (!strcmp(args
[0].from
, "none"))
261 nls_map
= load_nls(args
[0].from
);
263 pr_err("JFS: charset not found\n");
270 char *resize
= args
[0].from
;
271 *newLVSize
= simple_strtoull(resize
, &resize
, 0);
274 case Opt_resize_nosize
:
276 *newLVSize
= sb
->s_bdev
->bd_inode
->i_size
>>
277 sb
->s_blocksize_bits
;
279 pr_err("JFS: Cannot determine volume size\n");
284 char *errors
= args
[0].from
;
285 if (!errors
|| !*errors
)
287 if (!strcmp(errors
, "continue")) {
288 *flag
&= ~JFS_ERR_REMOUNT_RO
;
289 *flag
&= ~JFS_ERR_PANIC
;
290 *flag
|= JFS_ERR_CONTINUE
;
291 } else if (!strcmp(errors
, "remount-ro")) {
292 *flag
&= ~JFS_ERR_CONTINUE
;
293 *flag
&= ~JFS_ERR_PANIC
;
294 *flag
|= JFS_ERR_REMOUNT_RO
;
295 } else if (!strcmp(errors
, "panic")) {
296 *flag
&= ~JFS_ERR_CONTINUE
;
297 *flag
&= ~JFS_ERR_REMOUNT_RO
;
298 *flag
|= JFS_ERR_PANIC
;
300 pr_err("JFS: %s is an invalid error handler\n",
310 *flag
|= JFS_USRQUOTA
;
313 *flag
|= JFS_GRPQUOTA
;
319 pr_err("JFS: quota operations not supported\n");
324 char *uid
= args
[0].from
;
325 uid_t val
= simple_strtoul(uid
, &uid
, 0);
326 sbi
->uid
= make_kuid(current_user_ns(), val
);
327 if (!uid_valid(sbi
->uid
))
334 char *gid
= args
[0].from
;
335 gid_t val
= simple_strtoul(gid
, &gid
, 0);
336 sbi
->gid
= make_kgid(current_user_ns(), val
);
337 if (!gid_valid(sbi
->gid
))
344 char *umask
= args
[0].from
;
345 sbi
->umask
= simple_strtoul(umask
, &umask
, 8);
346 if (sbi
->umask
& ~0777) {
347 pr_err("JFS: Invalid value of umask\n");
355 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
356 /* if set to 1, even copying files will cause
358 * -> user has more control over the online trimming
360 sbi
->minblks_trim
= 64;
361 if (blk_queue_discard(q
)) {
362 *flag
|= JFS_DISCARD
;
364 pr_err("JFS: discard option " \
365 "not supported on device\n");
371 *flag
&= ~JFS_DISCARD
;
374 case Opt_discard_minblk
:
376 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
377 char *minblks_trim
= args
[0].from
;
378 if (blk_queue_discard(q
)) {
379 *flag
|= JFS_DISCARD
;
380 sbi
->minblks_trim
= simple_strtoull(
381 minblks_trim
, &minblks_trim
, 0);
383 pr_err("JFS: discard option " \
384 "not supported on device\n");
390 printk("jfs: Unrecognized mount option \"%s\" "
391 " or missing value\n", p
);
396 if (nls_map
!= (void *) -1) {
397 /* Discard old (if remount) */
398 unload_nls(sbi
->nls_tab
);
399 sbi
->nls_tab
= nls_map
;
404 if (nls_map
&& nls_map
!= (void *) -1)
409 static int jfs_remount(struct super_block
*sb
, int *flags
, char *data
)
413 int flag
= JFS_SBI(sb
)->flag
;
416 if (!parse_options(data
, sb
, &newLVSize
, &flag
)) {
421 if (sb
->s_flags
& MS_RDONLY
) {
422 pr_err("JFS: resize requires volume" \
423 " to be mounted read-write\n");
426 rc
= jfs_extendfs(sb
, newLVSize
, 0);
431 if ((sb
->s_flags
& MS_RDONLY
) && !(*flags
& MS_RDONLY
)) {
433 * Invalidate any previously read metadata. fsck may have
434 * changed the on-disk data since we mounted r/o
436 truncate_inode_pages(JFS_SBI(sb
)->direct_inode
->i_mapping
, 0);
438 JFS_SBI(sb
)->flag
= flag
;
439 ret
= jfs_mount_rw(sb
, 1);
441 /* mark the fs r/w for quota activity */
442 sb
->s_flags
&= ~MS_RDONLY
;
444 dquot_resume(sb
, -1);
447 if ((!(sb
->s_flags
& MS_RDONLY
)) && (*flags
& MS_RDONLY
)) {
448 rc
= dquot_suspend(sb
, -1);
452 rc
= jfs_umount_rw(sb
);
453 JFS_SBI(sb
)->flag
= flag
;
456 if ((JFS_SBI(sb
)->flag
& JFS_NOINTEGRITY
) != (flag
& JFS_NOINTEGRITY
))
457 if (!(sb
->s_flags
& MS_RDONLY
)) {
458 rc
= jfs_umount_rw(sb
);
462 JFS_SBI(sb
)->flag
= flag
;
463 ret
= jfs_mount_rw(sb
, 1);
466 JFS_SBI(sb
)->flag
= flag
;
471 static int jfs_fill_super(struct super_block
*sb
, void *data
, int silent
)
473 struct jfs_sb_info
*sbi
;
477 int flag
, ret
= -EINVAL
;
479 jfs_info("In jfs_read_super: s_flags=0x%lx", sb
->s_flags
);
481 if (!new_valid_dev(sb
->s_bdev
->bd_dev
))
484 sbi
= kzalloc(sizeof (struct jfs_sb_info
), GFP_KERNEL
);
489 sb
->s_max_links
= JFS_LINK_MAX
;
491 sbi
->uid
= INVALID_UID
;
492 sbi
->gid
= INVALID_GID
;
495 /* initialize the mount flag and determine the default error handler */
496 flag
= JFS_ERR_REMOUNT_RO
;
498 if (!parse_options((char *) data
, sb
, &newLVSize
, &flag
))
502 #ifdef CONFIG_JFS_POSIX_ACL
503 sb
->s_flags
|= MS_POSIXACL
;
507 pr_err("resize option for remount only\n");
512 * Initialize blocksize to 4K.
514 sb_set_blocksize(sb
, PSIZE
);
517 * Set method vectors.
519 sb
->s_op
= &jfs_super_operations
;
520 sb
->s_export_op
= &jfs_export_operations
;
522 sb
->dq_op
= &dquot_operations
;
523 sb
->s_qcop
= &dquot_quotactl_ops
;
527 * Initialize direct-mapping inode/address-space
529 inode
= new_inode(sb
);
535 inode
->i_size
= sb
->s_bdev
->bd_inode
->i_size
;
536 inode
->i_mapping
->a_ops
= &jfs_metapage_aops
;
537 insert_inode_hash(inode
);
538 mapping_set_gfp_mask(inode
->i_mapping
, GFP_NOFS
);
540 sbi
->direct_inode
= inode
;
545 jfs_err("jfs_mount failed w/return code = %d", rc
);
547 goto out_mount_failed
;
549 if (sb
->s_flags
& MS_RDONLY
)
552 rc
= jfs_mount_rw(sb
, 0);
555 jfs_err("jfs_mount_rw failed, return code = %d",
562 sb
->s_magic
= JFS_SUPER_MAGIC
;
564 if (sbi
->mntflag
& JFS_OS2
)
565 sb
->s_d_op
= &jfs_ci_dentry_operations
;
567 inode
= jfs_iget(sb
, ROOT_I
);
569 ret
= PTR_ERR(inode
);
572 sb
->s_root
= d_make_root(inode
);
576 /* logical blocks are represented by 40 bits in pxd_t, etc. */
577 sb
->s_maxbytes
= ((u64
) sb
->s_blocksize
) << 40;
578 #if BITS_PER_LONG == 32
580 * Page cache is indexed by long.
581 * I would use MAX_LFS_FILESIZE, but it's only half as big
583 sb
->s_maxbytes
= min(((u64
) PAGE_CACHE_SIZE
<< 32) - 1, (u64
)sb
->s_maxbytes
);
589 jfs_err("jfs_read_super: get root dentry failed");
594 jfs_err("jfs_umount failed with return code %d", rc
);
597 filemap_write_and_wait(sbi
->direct_inode
->i_mapping
);
598 truncate_inode_pages(sbi
->direct_inode
->i_mapping
, 0);
599 make_bad_inode(sbi
->direct_inode
);
600 iput(sbi
->direct_inode
);
601 sbi
->direct_inode
= NULL
;
604 unload_nls(sbi
->nls_tab
);
610 static int jfs_freeze(struct super_block
*sb
)
612 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
613 struct jfs_log
*log
= sbi
->log
;
615 if (!(sb
->s_flags
& MS_RDONLY
)) {
618 updateSuper(sb
, FM_CLEAN
);
623 static int jfs_unfreeze(struct super_block
*sb
)
625 struct jfs_sb_info
*sbi
= JFS_SBI(sb
);
626 struct jfs_log
*log
= sbi
->log
;
629 if (!(sb
->s_flags
& MS_RDONLY
)) {
630 updateSuper(sb
, FM_MOUNT
);
631 if ((rc
= lmLogInit(log
)))
632 jfs_err("jfs_unlock failed with return code %d", rc
);
639 static struct dentry
*jfs_do_mount(struct file_system_type
*fs_type
,
640 int flags
, const char *dev_name
, void *data
)
642 return mount_bdev(fs_type
, flags
, dev_name
, data
, jfs_fill_super
);
645 static int jfs_sync_fs(struct super_block
*sb
, int wait
)
647 struct jfs_log
*log
= JFS_SBI(sb
)->log
;
649 /* log == NULL indicates read-only mount */
652 * Write quota structures to quota file, sync_blockdev() will
653 * write them to disk later
655 dquot_writeback_dquots(sb
, -1);
656 jfs_flush_journal(log
, wait
);
663 static int jfs_show_options(struct seq_file
*seq
, struct dentry
*root
)
665 struct jfs_sb_info
*sbi
= JFS_SBI(root
->d_sb
);
667 if (uid_valid(sbi
->uid
))
668 seq_printf(seq
, ",uid=%d", from_kuid(&init_user_ns
, sbi
->uid
));
669 if (gid_valid(sbi
->gid
))
670 seq_printf(seq
, ",gid=%d", from_kgid(&init_user_ns
, sbi
->gid
));
671 if (sbi
->umask
!= -1)
672 seq_printf(seq
, ",umask=%03o", sbi
->umask
);
673 if (sbi
->flag
& JFS_NOINTEGRITY
)
674 seq_puts(seq
, ",nointegrity");
675 if (sbi
->flag
& JFS_DISCARD
)
676 seq_printf(seq
, ",discard=%u", sbi
->minblks_trim
);
678 seq_printf(seq
, ",iocharset=%s", sbi
->nls_tab
->charset
);
679 if (sbi
->flag
& JFS_ERR_CONTINUE
)
680 seq_printf(seq
, ",errors=continue");
681 if (sbi
->flag
& JFS_ERR_PANIC
)
682 seq_printf(seq
, ",errors=panic");
685 if (sbi
->flag
& JFS_USRQUOTA
)
686 seq_puts(seq
, ",usrquota");
688 if (sbi
->flag
& JFS_GRPQUOTA
)
689 seq_puts(seq
, ",grpquota");
697 /* Read data from quotafile - avoid pagecache and such because we cannot afford
698 * acquiring the locks... As quota files are never truncated and quota code
699 * itself serializes the operations (and no one else should touch the files)
700 * we don't have to be afraid of races */
701 static ssize_t
jfs_quota_read(struct super_block
*sb
, int type
, char *data
,
702 size_t len
, loff_t off
)
704 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
705 sector_t blk
= off
>> sb
->s_blocksize_bits
;
707 int offset
= off
& (sb
->s_blocksize
- 1);
710 struct buffer_head tmp_bh
;
711 struct buffer_head
*bh
;
712 loff_t i_size
= i_size_read(inode
);
716 if (off
+len
> i_size
)
720 tocopy
= sb
->s_blocksize
- offset
< toread
?
721 sb
->s_blocksize
- offset
: toread
;
724 tmp_bh
.b_size
= 1 << inode
->i_blkbits
;
725 err
= jfs_get_block(inode
, blk
, &tmp_bh
, 0);
728 if (!buffer_mapped(&tmp_bh
)) /* A hole? */
729 memset(data
, 0, tocopy
);
731 bh
= sb_bread(sb
, tmp_bh
.b_blocknr
);
734 memcpy(data
, bh
->b_data
+offset
, tocopy
);
745 /* Write to quotafile */
746 static ssize_t
jfs_quota_write(struct super_block
*sb
, int type
,
747 const char *data
, size_t len
, loff_t off
)
749 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
750 sector_t blk
= off
>> sb
->s_blocksize_bits
;
752 int offset
= off
& (sb
->s_blocksize
- 1);
754 size_t towrite
= len
;
755 struct buffer_head tmp_bh
;
756 struct buffer_head
*bh
;
758 mutex_lock(&inode
->i_mutex
);
759 while (towrite
> 0) {
760 tocopy
= sb
->s_blocksize
- offset
< towrite
?
761 sb
->s_blocksize
- offset
: towrite
;
764 tmp_bh
.b_size
= 1 << inode
->i_blkbits
;
765 err
= jfs_get_block(inode
, blk
, &tmp_bh
, 1);
768 if (offset
|| tocopy
!= sb
->s_blocksize
)
769 bh
= sb_bread(sb
, tmp_bh
.b_blocknr
);
771 bh
= sb_getblk(sb
, tmp_bh
.b_blocknr
);
777 memcpy(bh
->b_data
+offset
, data
, tocopy
);
778 flush_dcache_page(bh
->b_page
);
779 set_buffer_uptodate(bh
);
780 mark_buffer_dirty(bh
);
789 if (len
== towrite
) {
790 mutex_unlock(&inode
->i_mutex
);
793 if (inode
->i_size
< off
+len
-towrite
)
794 i_size_write(inode
, off
+len
-towrite
);
796 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
797 mark_inode_dirty(inode
);
798 mutex_unlock(&inode
->i_mutex
);
799 return len
- towrite
;
804 static const struct super_operations jfs_super_operations
= {
805 .alloc_inode
= jfs_alloc_inode
,
806 .destroy_inode
= jfs_destroy_inode
,
807 .dirty_inode
= jfs_dirty_inode
,
808 .write_inode
= jfs_write_inode
,
809 .evict_inode
= jfs_evict_inode
,
810 .put_super
= jfs_put_super
,
811 .sync_fs
= jfs_sync_fs
,
812 .freeze_fs
= jfs_freeze
,
813 .unfreeze_fs
= jfs_unfreeze
,
814 .statfs
= jfs_statfs
,
815 .remount_fs
= jfs_remount
,
816 .show_options
= jfs_show_options
,
818 .quota_read
= jfs_quota_read
,
819 .quota_write
= jfs_quota_write
,
823 static const struct export_operations jfs_export_operations
= {
824 .fh_to_dentry
= jfs_fh_to_dentry
,
825 .fh_to_parent
= jfs_fh_to_parent
,
826 .get_parent
= jfs_get_parent
,
829 static struct file_system_type jfs_fs_type
= {
830 .owner
= THIS_MODULE
,
832 .mount
= jfs_do_mount
,
833 .kill_sb
= kill_block_super
,
834 .fs_flags
= FS_REQUIRES_DEV
,
837 static void init_once(void *foo
)
839 struct jfs_inode_info
*jfs_ip
= (struct jfs_inode_info
*) foo
;
841 memset(jfs_ip
, 0, sizeof(struct jfs_inode_info
));
842 INIT_LIST_HEAD(&jfs_ip
->anon_inode_list
);
843 init_rwsem(&jfs_ip
->rdwrlock
);
844 mutex_init(&jfs_ip
->commit_mutex
);
845 init_rwsem(&jfs_ip
->xattr_sem
);
846 spin_lock_init(&jfs_ip
->ag_lock
);
847 jfs_ip
->active_ag
= -1;
848 inode_init_once(&jfs_ip
->vfs_inode
);
851 static int __init
init_jfs_fs(void)
857 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info
), 0,
858 SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
,
860 if (jfs_inode_cachep
== NULL
)
864 * Metapage initialization
866 rc
= metapage_init();
868 jfs_err("metapage_init failed w/rc = %d", rc
);
873 * Transaction Manager initialization
877 jfs_err("txInit failed w/rc = %d", rc
);
882 * I/O completion thread (endio)
884 jfsIOthread
= kthread_run(jfsIOWait
, NULL
, "jfsIO");
885 if (IS_ERR(jfsIOthread
)) {
886 rc
= PTR_ERR(jfsIOthread
);
887 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc
);
891 if (commit_threads
< 1)
892 commit_threads
= num_online_cpus();
893 if (commit_threads
> MAX_COMMIT_THREADS
)
894 commit_threads
= MAX_COMMIT_THREADS
;
896 for (i
= 0; i
< commit_threads
; i
++) {
897 jfsCommitThread
[i
] = kthread_run(jfs_lazycommit
, NULL
, "jfsCommit");
898 if (IS_ERR(jfsCommitThread
[i
])) {
899 rc
= PTR_ERR(jfsCommitThread
[i
]);
900 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc
);
902 goto kill_committask
;
906 jfsSyncThread
= kthread_run(jfs_sync
, NULL
, "jfsSync");
907 if (IS_ERR(jfsSyncThread
)) {
908 rc
= PTR_ERR(jfsSyncThread
);
909 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc
);
910 goto kill_committask
;
917 rc
= register_filesystem(&jfs_fs_type
);
924 kthread_stop(jfsSyncThread
);
926 for (i
= 0; i
< commit_threads
; i
++)
927 kthread_stop(jfsCommitThread
[i
]);
928 kthread_stop(jfsIOthread
);
934 kmem_cache_destroy(jfs_inode_cachep
);
938 static void __exit
exit_jfs_fs(void)
942 jfs_info("exit_jfs_fs called");
947 kthread_stop(jfsIOthread
);
948 for (i
= 0; i
< commit_threads
; i
++)
949 kthread_stop(jfsCommitThread
[i
]);
950 kthread_stop(jfsSyncThread
);
954 unregister_filesystem(&jfs_fs_type
);
957 * Make sure all delayed rcu free inodes are flushed before we
961 kmem_cache_destroy(jfs_inode_cachep
);
964 module_init(init_jfs_fs
)
965 module_exit(exit_jfs_fs
)