Merge branch 'x86/cleanups' into x86/core
[linux-2.6/mini2440.git] / fs / jfs / super.c
blobb37d1f78b854349d3681df7a88708f4310a266e4
1 /*
2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/fs.h>
21 #include <linux/module.h>
22 #include <linux/parser.h>
23 #include <linux/completion.h>
24 #include <linux/vfs.h>
25 #include <linux/quotaops.h>
26 #include <linux/mount.h>
27 #include <linux/moduleparam.h>
28 #include <linux/kthread.h>
29 #include <linux/posix_acl.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <asm/uaccess.h>
33 #include <linux/seq_file.h>
35 #include "jfs_incore.h"
36 #include "jfs_filsys.h"
37 #include "jfs_inode.h"
38 #include "jfs_metapage.h"
39 #include "jfs_superblock.h"
40 #include "jfs_dmap.h"
41 #include "jfs_imap.h"
42 #include "jfs_acl.h"
43 #include "jfs_debug.h"
45 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
46 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
47 MODULE_LICENSE("GPL");
49 static struct kmem_cache * jfs_inode_cachep;
51 static const struct super_operations jfs_super_operations;
52 static const struct export_operations jfs_export_operations;
53 static struct file_system_type jfs_fs_type;
55 #define MAX_COMMIT_THREADS 64
56 static int commit_threads = 0;
57 module_param(commit_threads, int, 0);
58 MODULE_PARM_DESC(commit_threads, "Number of commit threads");
60 static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
61 struct task_struct *jfsIOthread;
62 struct task_struct *jfsSyncThread;
64 #ifdef CONFIG_JFS_DEBUG
65 int jfsloglevel = JFS_LOGLEVEL_WARN;
66 module_param(jfsloglevel, int, 0644);
67 MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
68 #endif
70 static void jfs_handle_error(struct super_block *sb)
72 struct jfs_sb_info *sbi = JFS_SBI(sb);
74 if (sb->s_flags & MS_RDONLY)
75 return;
77 updateSuper(sb, FM_DIRTY);
79 if (sbi->flag & JFS_ERR_PANIC)
80 panic("JFS (device %s): panic forced after error\n",
81 sb->s_id);
82 else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
83 jfs_err("ERROR: (device %s): remounting filesystem "
84 "as read-only\n",
85 sb->s_id);
86 sb->s_flags |= MS_RDONLY;
89 /* nothing is done for continue beyond marking the superblock dirty */
92 void jfs_error(struct super_block *sb, const char * function, ...)
94 static char error_buf[256];
95 va_list args;
97 va_start(args, function);
98 vsnprintf(error_buf, sizeof(error_buf), function, args);
99 va_end(args);
101 printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
103 jfs_handle_error(sb);
106 static struct inode *jfs_alloc_inode(struct super_block *sb)
108 struct jfs_inode_info *jfs_inode;
110 jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
111 if (!jfs_inode)
112 return NULL;
113 return &jfs_inode->vfs_inode;
116 static void jfs_destroy_inode(struct inode *inode)
118 struct jfs_inode_info *ji = JFS_IP(inode);
120 BUG_ON(!list_empty(&ji->anon_inode_list));
122 spin_lock_irq(&ji->ag_lock);
123 if (ji->active_ag != -1) {
124 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
125 atomic_dec(&bmap->db_active[ji->active_ag]);
126 ji->active_ag = -1;
128 spin_unlock_irq(&ji->ag_lock);
130 #ifdef CONFIG_JFS_POSIX_ACL
131 if (ji->i_acl != JFS_ACL_NOT_CACHED) {
132 posix_acl_release(ji->i_acl);
133 ji->i_acl = JFS_ACL_NOT_CACHED;
135 if (ji->i_default_acl != JFS_ACL_NOT_CACHED) {
136 posix_acl_release(ji->i_default_acl);
137 ji->i_default_acl = JFS_ACL_NOT_CACHED;
139 #endif
141 kmem_cache_free(jfs_inode_cachep, ji);
144 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
146 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
147 s64 maxinodes;
148 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
150 jfs_info("In jfs_statfs");
151 buf->f_type = JFS_SUPER_MAGIC;
152 buf->f_bsize = sbi->bsize;
153 buf->f_blocks = sbi->bmap->db_mapsize;
154 buf->f_bfree = sbi->bmap->db_nfree;
155 buf->f_bavail = sbi->bmap->db_nfree;
157 * If we really return the number of allocated & free inodes, some
158 * applications will fail because they won't see enough free inodes.
159 * We'll try to calculate some guess as to how may inodes we can
160 * really allocate
162 * buf->f_files = atomic_read(&imap->im_numinos);
163 * buf->f_ffree = atomic_read(&imap->im_numfree);
165 maxinodes = min((s64) atomic_read(&imap->im_numinos) +
166 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
167 << L2INOSPEREXT), (s64) 0xffffffffLL);
168 buf->f_files = maxinodes;
169 buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
170 atomic_read(&imap->im_numfree));
172 buf->f_namelen = JFS_NAME_MAX;
173 return 0;
176 static void jfs_put_super(struct super_block *sb)
178 struct jfs_sb_info *sbi = JFS_SBI(sb);
179 int rc;
181 jfs_info("In jfs_put_super");
182 rc = jfs_umount(sb);
183 if (rc)
184 jfs_err("jfs_umount failed with return code %d", rc);
185 if (sbi->nls_tab)
186 unload_nls(sbi->nls_tab);
187 sbi->nls_tab = NULL;
189 truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
190 iput(sbi->direct_inode);
191 sbi->direct_inode = NULL;
193 kfree(sbi);
196 enum {
197 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
198 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
199 Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
202 static const match_table_t tokens = {
203 {Opt_integrity, "integrity"},
204 {Opt_nointegrity, "nointegrity"},
205 {Opt_iocharset, "iocharset=%s"},
206 {Opt_resize, "resize=%u"},
207 {Opt_resize_nosize, "resize"},
208 {Opt_errors, "errors=%s"},
209 {Opt_ignore, "noquota"},
210 {Opt_ignore, "quota"},
211 {Opt_usrquota, "usrquota"},
212 {Opt_grpquota, "grpquota"},
213 {Opt_uid, "uid=%u"},
214 {Opt_gid, "gid=%u"},
215 {Opt_umask, "umask=%u"},
216 {Opt_err, NULL}
219 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
220 int *flag)
222 void *nls_map = (void *)-1; /* -1: no change; NULL: none */
223 char *p;
224 struct jfs_sb_info *sbi = JFS_SBI(sb);
226 *newLVSize = 0;
228 if (!options)
229 return 1;
231 while ((p = strsep(&options, ",")) != NULL) {
232 substring_t args[MAX_OPT_ARGS];
233 int token;
234 if (!*p)
235 continue;
237 token = match_token(p, tokens, args);
238 switch (token) {
239 case Opt_integrity:
240 *flag &= ~JFS_NOINTEGRITY;
241 break;
242 case Opt_nointegrity:
243 *flag |= JFS_NOINTEGRITY;
244 break;
245 case Opt_ignore:
246 /* Silently ignore the quota options */
247 /* Don't do anything ;-) */
248 break;
249 case Opt_iocharset:
250 if (nls_map && nls_map != (void *) -1)
251 unload_nls(nls_map);
252 if (!strcmp(args[0].from, "none"))
253 nls_map = NULL;
254 else {
255 nls_map = load_nls(args[0].from);
256 if (!nls_map) {
257 printk(KERN_ERR
258 "JFS: charset not found\n");
259 goto cleanup;
262 break;
263 case Opt_resize:
265 char *resize = args[0].from;
266 *newLVSize = simple_strtoull(resize, &resize, 0);
267 break;
269 case Opt_resize_nosize:
271 *newLVSize = sb->s_bdev->bd_inode->i_size >>
272 sb->s_blocksize_bits;
273 if (*newLVSize == 0)
274 printk(KERN_ERR
275 "JFS: Cannot determine volume size\n");
276 break;
278 case Opt_errors:
280 char *errors = args[0].from;
281 if (!errors || !*errors)
282 goto cleanup;
283 if (!strcmp(errors, "continue")) {
284 *flag &= ~JFS_ERR_REMOUNT_RO;
285 *flag &= ~JFS_ERR_PANIC;
286 *flag |= JFS_ERR_CONTINUE;
287 } else if (!strcmp(errors, "remount-ro")) {
288 *flag &= ~JFS_ERR_CONTINUE;
289 *flag &= ~JFS_ERR_PANIC;
290 *flag |= JFS_ERR_REMOUNT_RO;
291 } else if (!strcmp(errors, "panic")) {
292 *flag &= ~JFS_ERR_CONTINUE;
293 *flag &= ~JFS_ERR_REMOUNT_RO;
294 *flag |= JFS_ERR_PANIC;
295 } else {
296 printk(KERN_ERR
297 "JFS: %s is an invalid error handler\n",
298 errors);
299 goto cleanup;
301 break;
304 #ifdef CONFIG_QUOTA
305 case Opt_quota:
306 case Opt_usrquota:
307 *flag |= JFS_USRQUOTA;
308 break;
309 case Opt_grpquota:
310 *flag |= JFS_GRPQUOTA;
311 break;
312 #else
313 case Opt_usrquota:
314 case Opt_grpquota:
315 case Opt_quota:
316 printk(KERN_ERR
317 "JFS: quota operations not supported\n");
318 break;
319 #endif
320 case Opt_uid:
322 char *uid = args[0].from;
323 sbi->uid = simple_strtoul(uid, &uid, 0);
324 break;
326 case Opt_gid:
328 char *gid = args[0].from;
329 sbi->gid = simple_strtoul(gid, &gid, 0);
330 break;
332 case Opt_umask:
334 char *umask = args[0].from;
335 sbi->umask = simple_strtoul(umask, &umask, 8);
336 if (sbi->umask & ~0777) {
337 printk(KERN_ERR
338 "JFS: Invalid value of umask\n");
339 goto cleanup;
341 break;
343 default:
344 printk("jfs: Unrecognized mount option \"%s\" "
345 " or missing value\n", p);
346 goto cleanup;
350 if (nls_map != (void *) -1) {
351 /* Discard old (if remount) */
352 if (sbi->nls_tab)
353 unload_nls(sbi->nls_tab);
354 sbi->nls_tab = nls_map;
356 return 1;
358 cleanup:
359 if (nls_map && nls_map != (void *) -1)
360 unload_nls(nls_map);
361 return 0;
364 static int jfs_remount(struct super_block *sb, int *flags, char *data)
366 s64 newLVSize = 0;
367 int rc = 0;
368 int flag = JFS_SBI(sb)->flag;
370 if (!parse_options(data, sb, &newLVSize, &flag)) {
371 return -EINVAL;
373 if (newLVSize) {
374 if (sb->s_flags & MS_RDONLY) {
375 printk(KERN_ERR
376 "JFS: resize requires volume to be mounted read-write\n");
377 return -EROFS;
379 rc = jfs_extendfs(sb, newLVSize, 0);
380 if (rc)
381 return rc;
384 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
386 * Invalidate any previously read metadata. fsck may have
387 * changed the on-disk data since we mounted r/o
389 truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
391 JFS_SBI(sb)->flag = flag;
392 return jfs_mount_rw(sb, 1);
394 if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
395 rc = jfs_umount_rw(sb);
396 JFS_SBI(sb)->flag = flag;
397 return rc;
399 if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
400 if (!(sb->s_flags & MS_RDONLY)) {
401 rc = jfs_umount_rw(sb);
402 if (rc)
403 return rc;
404 JFS_SBI(sb)->flag = flag;
405 return jfs_mount_rw(sb, 1);
407 JFS_SBI(sb)->flag = flag;
409 return 0;
412 static int jfs_fill_super(struct super_block *sb, void *data, int silent)
414 struct jfs_sb_info *sbi;
415 struct inode *inode;
416 int rc;
417 s64 newLVSize = 0;
418 int flag, ret = -EINVAL;
420 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
422 if (!new_valid_dev(sb->s_bdev->bd_dev))
423 return -EOVERFLOW;
425 sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
426 if (!sbi)
427 return -ENOMEM;
428 sb->s_fs_info = sbi;
429 sbi->sb = sb;
430 sbi->uid = sbi->gid = sbi->umask = -1;
432 /* initialize the mount flag and determine the default error handler */
433 flag = JFS_ERR_REMOUNT_RO;
435 if (!parse_options((char *) data, sb, &newLVSize, &flag)) {
436 kfree(sbi);
437 return -EINVAL;
439 sbi->flag = flag;
441 #ifdef CONFIG_JFS_POSIX_ACL
442 sb->s_flags |= MS_POSIXACL;
443 #endif
445 if (newLVSize) {
446 printk(KERN_ERR "resize option for remount only\n");
447 return -EINVAL;
451 * Initialize blocksize to 4K.
453 sb_set_blocksize(sb, PSIZE);
456 * Set method vectors.
458 sb->s_op = &jfs_super_operations;
459 sb->s_export_op = &jfs_export_operations;
462 * Initialize direct-mapping inode/address-space
464 inode = new_inode(sb);
465 if (inode == NULL) {
466 ret = -ENOMEM;
467 goto out_kfree;
469 inode->i_ino = 0;
470 inode->i_nlink = 1;
471 inode->i_size = sb->s_bdev->bd_inode->i_size;
472 inode->i_mapping->a_ops = &jfs_metapage_aops;
473 insert_inode_hash(inode);
474 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
476 sbi->direct_inode = inode;
478 rc = jfs_mount(sb);
479 if (rc) {
480 if (!silent) {
481 jfs_err("jfs_mount failed w/return code = %d", rc);
483 goto out_mount_failed;
485 if (sb->s_flags & MS_RDONLY)
486 sbi->log = NULL;
487 else {
488 rc = jfs_mount_rw(sb, 0);
489 if (rc) {
490 if (!silent) {
491 jfs_err("jfs_mount_rw failed, return code = %d",
492 rc);
494 goto out_no_rw;
498 sb->s_magic = JFS_SUPER_MAGIC;
500 inode = jfs_iget(sb, ROOT_I);
501 if (IS_ERR(inode)) {
502 ret = PTR_ERR(inode);
503 goto out_no_rw;
505 sb->s_root = d_alloc_root(inode);
506 if (!sb->s_root)
507 goto out_no_root;
509 if (sbi->mntflag & JFS_OS2)
510 sb->s_root->d_op = &jfs_ci_dentry_operations;
512 /* logical blocks are represented by 40 bits in pxd_t, etc. */
513 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
514 #if BITS_PER_LONG == 32
516 * Page cache is indexed by long.
517 * I would use MAX_LFS_FILESIZE, but it's only half as big
519 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
520 #endif
521 sb->s_time_gran = 1;
522 return 0;
524 out_no_root:
525 jfs_err("jfs_read_super: get root dentry failed");
526 iput(inode);
528 out_no_rw:
529 rc = jfs_umount(sb);
530 if (rc) {
531 jfs_err("jfs_umount failed with return code %d", rc);
533 out_mount_failed:
534 filemap_write_and_wait(sbi->direct_inode->i_mapping);
535 truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
536 make_bad_inode(sbi->direct_inode);
537 iput(sbi->direct_inode);
538 sbi->direct_inode = NULL;
539 out_kfree:
540 if (sbi->nls_tab)
541 unload_nls(sbi->nls_tab);
542 kfree(sbi);
543 return ret;
546 static int jfs_freeze(struct super_block *sb)
548 struct jfs_sb_info *sbi = JFS_SBI(sb);
549 struct jfs_log *log = sbi->log;
551 if (!(sb->s_flags & MS_RDONLY)) {
552 txQuiesce(sb);
553 lmLogShutdown(log);
554 updateSuper(sb, FM_CLEAN);
556 return 0;
559 static int jfs_unfreeze(struct super_block *sb)
561 struct jfs_sb_info *sbi = JFS_SBI(sb);
562 struct jfs_log *log = sbi->log;
563 int rc = 0;
565 if (!(sb->s_flags & MS_RDONLY)) {
566 updateSuper(sb, FM_MOUNT);
567 if ((rc = lmLogInit(log)))
568 jfs_err("jfs_unlock failed with return code %d", rc);
569 else
570 txResume(sb);
572 return 0;
575 static int jfs_get_sb(struct file_system_type *fs_type,
576 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
578 return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super,
579 mnt);
582 static int jfs_sync_fs(struct super_block *sb, int wait)
584 struct jfs_log *log = JFS_SBI(sb)->log;
586 /* log == NULL indicates read-only mount */
587 if (log) {
588 jfs_flush_journal(log, wait);
589 jfs_syncpt(log, 0);
592 return 0;
595 static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
597 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
599 if (sbi->uid != -1)
600 seq_printf(seq, ",uid=%d", sbi->uid);
601 if (sbi->gid != -1)
602 seq_printf(seq, ",gid=%d", sbi->gid);
603 if (sbi->umask != -1)
604 seq_printf(seq, ",umask=%03o", sbi->umask);
605 if (sbi->flag & JFS_NOINTEGRITY)
606 seq_puts(seq, ",nointegrity");
607 if (sbi->nls_tab)
608 seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
609 if (sbi->flag & JFS_ERR_CONTINUE)
610 seq_printf(seq, ",errors=continue");
611 if (sbi->flag & JFS_ERR_PANIC)
612 seq_printf(seq, ",errors=panic");
614 #ifdef CONFIG_QUOTA
615 if (sbi->flag & JFS_USRQUOTA)
616 seq_puts(seq, ",usrquota");
618 if (sbi->flag & JFS_GRPQUOTA)
619 seq_puts(seq, ",grpquota");
620 #endif
622 return 0;
625 #ifdef CONFIG_QUOTA
627 /* Read data from quotafile - avoid pagecache and such because we cannot afford
628 * acquiring the locks... As quota files are never truncated and quota code
629 * itself serializes the operations (and noone else should touch the files)
630 * we don't have to be afraid of races */
631 static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
632 size_t len, loff_t off)
634 struct inode *inode = sb_dqopt(sb)->files[type];
635 sector_t blk = off >> sb->s_blocksize_bits;
636 int err = 0;
637 int offset = off & (sb->s_blocksize - 1);
638 int tocopy;
639 size_t toread;
640 struct buffer_head tmp_bh;
641 struct buffer_head *bh;
642 loff_t i_size = i_size_read(inode);
644 if (off > i_size)
645 return 0;
646 if (off+len > i_size)
647 len = i_size-off;
648 toread = len;
649 while (toread > 0) {
650 tocopy = sb->s_blocksize - offset < toread ?
651 sb->s_blocksize - offset : toread;
653 tmp_bh.b_state = 0;
654 tmp_bh.b_size = 1 << inode->i_blkbits;
655 err = jfs_get_block(inode, blk, &tmp_bh, 0);
656 if (err)
657 return err;
658 if (!buffer_mapped(&tmp_bh)) /* A hole? */
659 memset(data, 0, tocopy);
660 else {
661 bh = sb_bread(sb, tmp_bh.b_blocknr);
662 if (!bh)
663 return -EIO;
664 memcpy(data, bh->b_data+offset, tocopy);
665 brelse(bh);
667 offset = 0;
668 toread -= tocopy;
669 data += tocopy;
670 blk++;
672 return len;
675 /* Write to quotafile */
676 static ssize_t jfs_quota_write(struct super_block *sb, int type,
677 const char *data, size_t len, loff_t off)
679 struct inode *inode = sb_dqopt(sb)->files[type];
680 sector_t blk = off >> sb->s_blocksize_bits;
681 int err = 0;
682 int offset = off & (sb->s_blocksize - 1);
683 int tocopy;
684 size_t towrite = len;
685 struct buffer_head tmp_bh;
686 struct buffer_head *bh;
688 mutex_lock(&inode->i_mutex);
689 while (towrite > 0) {
690 tocopy = sb->s_blocksize - offset < towrite ?
691 sb->s_blocksize - offset : towrite;
693 tmp_bh.b_state = 0;
694 tmp_bh.b_size = 1 << inode->i_blkbits;
695 err = jfs_get_block(inode, blk, &tmp_bh, 1);
696 if (err)
697 goto out;
698 if (offset || tocopy != sb->s_blocksize)
699 bh = sb_bread(sb, tmp_bh.b_blocknr);
700 else
701 bh = sb_getblk(sb, tmp_bh.b_blocknr);
702 if (!bh) {
703 err = -EIO;
704 goto out;
706 lock_buffer(bh);
707 memcpy(bh->b_data+offset, data, tocopy);
708 flush_dcache_page(bh->b_page);
709 set_buffer_uptodate(bh);
710 mark_buffer_dirty(bh);
711 unlock_buffer(bh);
712 brelse(bh);
713 offset = 0;
714 towrite -= tocopy;
715 data += tocopy;
716 blk++;
718 out:
719 if (len == towrite)
720 return err;
721 if (inode->i_size < off+len-towrite)
722 i_size_write(inode, off+len-towrite);
723 inode->i_version++;
724 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
725 mark_inode_dirty(inode);
726 mutex_unlock(&inode->i_mutex);
727 return len - towrite;
730 #endif
732 static const struct super_operations jfs_super_operations = {
733 .alloc_inode = jfs_alloc_inode,
734 .destroy_inode = jfs_destroy_inode,
735 .dirty_inode = jfs_dirty_inode,
736 .write_inode = jfs_write_inode,
737 .delete_inode = jfs_delete_inode,
738 .put_super = jfs_put_super,
739 .sync_fs = jfs_sync_fs,
740 .freeze_fs = jfs_freeze,
741 .unfreeze_fs = jfs_unfreeze,
742 .statfs = jfs_statfs,
743 .remount_fs = jfs_remount,
744 .show_options = jfs_show_options,
745 #ifdef CONFIG_QUOTA
746 .quota_read = jfs_quota_read,
747 .quota_write = jfs_quota_write,
748 #endif
751 static const struct export_operations jfs_export_operations = {
752 .fh_to_dentry = jfs_fh_to_dentry,
753 .fh_to_parent = jfs_fh_to_parent,
754 .get_parent = jfs_get_parent,
757 static struct file_system_type jfs_fs_type = {
758 .owner = THIS_MODULE,
759 .name = "jfs",
760 .get_sb = jfs_get_sb,
761 .kill_sb = kill_block_super,
762 .fs_flags = FS_REQUIRES_DEV,
765 static void init_once(void *foo)
767 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
769 memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
770 INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
771 init_rwsem(&jfs_ip->rdwrlock);
772 mutex_init(&jfs_ip->commit_mutex);
773 init_rwsem(&jfs_ip->xattr_sem);
774 spin_lock_init(&jfs_ip->ag_lock);
775 jfs_ip->active_ag = -1;
776 #ifdef CONFIG_JFS_POSIX_ACL
777 jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
778 jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
779 #endif
780 inode_init_once(&jfs_ip->vfs_inode);
783 static int __init init_jfs_fs(void)
785 int i;
786 int rc;
788 jfs_inode_cachep =
789 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
790 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
791 init_once);
792 if (jfs_inode_cachep == NULL)
793 return -ENOMEM;
796 * Metapage initialization
798 rc = metapage_init();
799 if (rc) {
800 jfs_err("metapage_init failed w/rc = %d", rc);
801 goto free_slab;
805 * Transaction Manager initialization
807 rc = txInit();
808 if (rc) {
809 jfs_err("txInit failed w/rc = %d", rc);
810 goto free_metapage;
814 * I/O completion thread (endio)
816 jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
817 if (IS_ERR(jfsIOthread)) {
818 rc = PTR_ERR(jfsIOthread);
819 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
820 goto end_txmngr;
823 if (commit_threads < 1)
824 commit_threads = num_online_cpus();
825 if (commit_threads > MAX_COMMIT_THREADS)
826 commit_threads = MAX_COMMIT_THREADS;
828 for (i = 0; i < commit_threads; i++) {
829 jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit");
830 if (IS_ERR(jfsCommitThread[i])) {
831 rc = PTR_ERR(jfsCommitThread[i]);
832 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
833 commit_threads = i;
834 goto kill_committask;
838 jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
839 if (IS_ERR(jfsSyncThread)) {
840 rc = PTR_ERR(jfsSyncThread);
841 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
842 goto kill_committask;
845 #ifdef PROC_FS_JFS
846 jfs_proc_init();
847 #endif
849 return register_filesystem(&jfs_fs_type);
851 kill_committask:
852 for (i = 0; i < commit_threads; i++)
853 kthread_stop(jfsCommitThread[i]);
854 kthread_stop(jfsIOthread);
855 end_txmngr:
856 txExit();
857 free_metapage:
858 metapage_exit();
859 free_slab:
860 kmem_cache_destroy(jfs_inode_cachep);
861 return rc;
864 static void __exit exit_jfs_fs(void)
866 int i;
868 jfs_info("exit_jfs_fs called");
870 txExit();
871 metapage_exit();
873 kthread_stop(jfsIOthread);
874 for (i = 0; i < commit_threads; i++)
875 kthread_stop(jfsCommitThread[i]);
876 kthread_stop(jfsSyncThread);
877 #ifdef PROC_FS_JFS
878 jfs_proc_clean();
879 #endif
880 unregister_filesystem(&jfs_fs_type);
881 kmem_cache_destroy(jfs_inode_cachep);
884 module_init(init_jfs_fs)
885 module_exit(exit_jfs_fs)