Remove references to CONFIG_PROFILE. Kernel profiling is no longer a
[linux-2.6/linux-mips.git] / fs / file_table.c
blobceb3b7069f78fcc1e7e8420ba03ce95e13e25233
1 /*
2 * linux/fs/file_table.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/smp_lock.h>
15 /* SLAB cache for filp's. */
16 static kmem_cache_t *filp_cache;
18 /* sysctl tunables... */
19 struct files_stat_struct files_stat = {0, 0, NR_FILE};
21 /* Here the new files go */
22 static LIST_HEAD(anon_list);
23 /* And here the free ones sit */
24 static LIST_HEAD(free_list);
25 /* public *and* exported. Not pretty! */
26 spinlock_t files_lock = SPIN_LOCK_UNLOCKED;
28 void __init file_table_init(void)
30 filp_cache = kmem_cache_create("filp", sizeof(struct file),
32 SLAB_HWCACHE_ALIGN, NULL, NULL);
33 if(!filp_cache)
34 panic("VFS: Cannot alloc filp SLAB cache.");
36 * We could allocate the reserved files here, but really
37 * shouldn't need to: the normal boot process will create
38 * plenty of free files.
42 /* Find an unused file structure and return a pointer to it.
43 * Returns NULL, if there are no more free file structures or
44 * we run out of memory.
46 * SMP-safe.
48 struct file * get_empty_filp(void)
50 static int old_max = 0;
51 struct file * f;
53 file_list_lock();
54 if (files_stat.nr_free_files > NR_RESERVED_FILES) {
55 used_one:
56 f = list_entry(free_list.next, struct file, f_list);
57 list_del(&f->f_list);
58 files_stat.nr_free_files--;
59 new_one:
60 file_list_unlock();
61 memset(f, 0, sizeof(*f));
62 atomic_set(&f->f_count,1);
63 f->f_version = ++event;
64 f->f_uid = current->fsuid;
65 f->f_gid = current->fsgid;
66 file_list_lock();
67 list_add(&f->f_list, &anon_list);
68 file_list_unlock();
69 return f;
72 * Use a reserved one if we're the superuser
74 if (files_stat.nr_free_files && !current->euid)
75 goto used_one;
77 * Allocate a new one if we're below the limit.
79 if (files_stat.nr_files < files_stat.max_files) {
80 file_list_unlock();
81 f = kmem_cache_alloc(filp_cache, SLAB_KERNEL);
82 file_list_lock();
83 if (f) {
84 files_stat.nr_files++;
85 goto new_one;
87 /* Big problems... */
88 printk("VFS: filp allocation failed\n");
90 } else if (files_stat.max_files > old_max) {
91 printk("VFS: file-max limit %d reached\n", files_stat.max_files);
92 old_max = files_stat.max_files;
94 file_list_unlock();
95 return NULL;
99 * Clear and initialize a (private) struct file for the given dentry,
100 * and call the open function (if any). The caller must verify that
101 * inode->i_fop is not NULL. The only user is nfsfh.c and this function
102 * will eventually go away.
104 int init_private_file(struct file *filp, struct dentry *dentry, int mode)
106 memset(filp, 0, sizeof(*filp));
107 filp->f_mode = mode;
108 atomic_set(&filp->f_count, 1);
109 filp->f_dentry = dentry;
110 filp->f_uid = current->fsuid;
111 filp->f_gid = current->fsgid;
112 filp->f_op = dentry->d_inode->i_fop;
113 if (filp->f_op->open)
114 return filp->f_op->open(dentry->d_inode, filp);
115 else
116 return 0;
120 * Called when retiring the last use of a file pointer.
122 static void __fput(struct file *filp)
124 struct dentry * dentry = filp->f_dentry;
125 struct vfsmount * mnt = filp->f_vfsmnt;
126 struct inode * inode = dentry->d_inode;
128 if (filp->f_op && filp->f_op->release)
129 filp->f_op->release(inode, filp);
130 fops_put(filp->f_op);
131 filp->f_dentry = NULL;
132 filp->f_vfsmnt = NULL;
133 if (filp->f_mode & FMODE_WRITE)
134 put_write_access(inode);
135 dput(dentry);
136 if (mnt)
137 mntput(mnt);
140 void _fput(struct file *file)
142 lock_kernel();
143 locks_remove_flock(file); /* Still need the */
144 __fput(file); /* big lock here. */
145 unlock_kernel();
147 file_list_lock();
148 list_del(&file->f_list);
149 list_add(&file->f_list, &free_list);
150 files_stat.nr_free_files++;
151 file_list_unlock();
154 /* Here. put_filp() is SMP-safe now. */
156 void put_filp(struct file *file)
158 if(atomic_dec_and_test(&file->f_count)) {
159 file_list_lock();
160 list_del(&file->f_list);
161 list_add(&file->f_list, &free_list);
162 files_stat.nr_free_files++;
163 file_list_unlock();
167 void file_move(struct file *file, struct list_head *list)
169 if (!list)
170 return;
171 file_list_lock();
172 list_del(&file->f_list);
173 list_add(&file->f_list, list);
174 file_list_unlock();
177 void file_moveto(struct file *new, struct file *old)
179 file_list_lock();
180 list_del(&new->f_list);
181 list_add(&new->f_list, &old->f_list);
182 file_list_unlock();
185 int fs_may_remount_ro(struct super_block *sb)
187 struct list_head *p;
189 /* Check that no files are currently opened for writing. */
190 file_list_lock();
191 for (p = sb->s_files.next; p != &sb->s_files; p = p->next) {
192 struct file *file = list_entry(p, struct file, f_list);
193 struct inode *inode = file->f_dentry->d_inode;
194 if (!inode)
195 continue;
197 /* File with pending delete? */
198 if (inode->i_nlink == 0)
199 goto too_bad;
201 /* Writable file? */
202 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
203 goto too_bad;
205 file_list_unlock();
206 return 1; /* Tis' cool bro. */
207 too_bad:
208 file_list_unlock();
209 return 0;