reorder alloc_fd/attach_fd in socketpair()
[linux-2.6/linux-2.6-openrd.git] / fs / file_table.c
blobf906ac8c9a9fd80aed9ac9bd5f530244182c24c1
1 /*
2 * linux/fs/file_table.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu_counter.h>
25 #include <asm/atomic.h>
27 /* sysctl tunables... */
28 struct files_stat_struct files_stat = {
29 .max_files = NR_FILE
32 /* public. Not pretty! */
33 __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
35 /* SLAB cache for file structures */
36 static struct kmem_cache *filp_cachep __read_mostly;
38 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
40 static inline void file_free_rcu(struct rcu_head *head)
42 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
44 put_cred(f->f_cred);
45 kmem_cache_free(filp_cachep, f);
48 static inline void file_free(struct file *f)
50 percpu_counter_dec(&nr_files);
51 file_check_state(f);
52 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
56 * Return the total number of open files in the system
58 static int get_nr_files(void)
60 return percpu_counter_read_positive(&nr_files);
64 * Return the maximum number of open files in the system
66 int get_max_files(void)
68 return files_stat.max_files;
70 EXPORT_SYMBOL_GPL(get_max_files);
73 * Handle nr_files sysctl
75 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
76 int proc_nr_files(ctl_table *table, int write,
77 void __user *buffer, size_t *lenp, loff_t *ppos)
79 files_stat.nr_files = get_nr_files();
80 return proc_dointvec(table, write, buffer, lenp, ppos);
82 #else
83 int proc_nr_files(ctl_table *table, int write,
84 void __user *buffer, size_t *lenp, loff_t *ppos)
86 return -ENOSYS;
88 #endif
90 /* Find an unused file structure and return a pointer to it.
91 * Returns NULL, if there are no more free file structures or
92 * we run out of memory.
94 * Be very careful using this. You are responsible for
95 * getting write access to any mount that you might assign
96 * to this filp, if it is opened for write. If this is not
97 * done, you will imbalance int the mount's writer count
98 * and a warning at __fput() time.
100 struct file *get_empty_filp(void)
102 const struct cred *cred = current_cred();
103 static int old_max;
104 struct file * f;
107 * Privileged users can go above max_files
109 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
111 * percpu_counters are inaccurate. Do an expensive check before
112 * we go and fail.
114 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
115 goto over;
118 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
119 if (f == NULL)
120 goto fail;
122 percpu_counter_inc(&nr_files);
123 if (security_file_alloc(f))
124 goto fail_sec;
126 INIT_LIST_HEAD(&f->f_u.fu_list);
127 atomic_long_set(&f->f_count, 1);
128 rwlock_init(&f->f_owner.lock);
129 f->f_cred = get_cred(cred);
130 spin_lock_init(&f->f_lock);
131 eventpoll_init_file(f);
132 /* f->f_version: 0 */
133 return f;
135 over:
136 /* Ran out of filps - report that */
137 if (get_nr_files() > old_max) {
138 printk(KERN_INFO "VFS: file-max limit %d reached\n",
139 get_max_files());
140 old_max = get_nr_files();
142 goto fail;
144 fail_sec:
145 file_free(f);
146 fail:
147 return NULL;
151 * alloc_file - allocate and initialize a 'struct file'
152 * @mnt: the vfsmount on which the file will reside
153 * @dentry: the dentry representing the new file
154 * @mode: the mode with which the new file will be opened
155 * @fop: the 'struct file_operations' for the new file
157 * Use this instead of get_empty_filp() to get a new
158 * 'struct file'. Do so because of the same initialization
159 * pitfalls reasons listed for init_file(). This is a
160 * preferred interface to using init_file().
162 * If all the callers of init_file() are eliminated, its
163 * code should be moved into this function.
165 struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
166 fmode_t mode, const struct file_operations *fop)
168 struct file *file;
170 file = get_empty_filp();
171 if (!file)
172 return NULL;
174 init_file(file, mnt, dentry, mode, fop);
175 return file;
177 EXPORT_SYMBOL(alloc_file);
180 * init_file - initialize a 'struct file'
181 * @file: the already allocated 'struct file' to initialized
182 * @mnt: the vfsmount on which the file resides
183 * @dentry: the dentry representing this file
184 * @mode: the mode the file is opened with
185 * @fop: the 'struct file_operations' for this file
187 * Use this instead of setting the members directly. Doing so
188 * avoids making mistakes like forgetting the mntget() or
189 * forgetting to take a write on the mnt.
191 * Note: This is a crappy interface. It is here to make
192 * merging with the existing users of get_empty_filp()
193 * who have complex failure logic easier. All users
194 * of this should be moving to alloc_file().
196 int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
197 fmode_t mode, const struct file_operations *fop)
199 int error = 0;
200 file->f_path.dentry = dentry;
201 file->f_path.mnt = mntget(mnt);
202 file->f_mapping = dentry->d_inode->i_mapping;
203 file->f_mode = mode;
204 file->f_op = fop;
207 * These mounts don't really matter in practice
208 * for r/o bind mounts. They aren't userspace-
209 * visible. We do this for consistency, and so
210 * that we can do debugging checks at __fput()
212 if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) {
213 file_take_write(file);
214 error = mnt_clone_write(mnt);
215 WARN_ON(error);
217 return error;
219 EXPORT_SYMBOL(init_file);
221 void fput(struct file *file)
223 if (atomic_long_dec_and_test(&file->f_count))
224 __fput(file);
227 EXPORT_SYMBOL(fput);
230 * drop_file_write_access - give up ability to write to a file
231 * @file: the file to which we will stop writing
233 * This is a central place which will give up the ability
234 * to write to @file, along with access to write through
235 * its vfsmount.
237 void drop_file_write_access(struct file *file)
239 struct vfsmount *mnt = file->f_path.mnt;
240 struct dentry *dentry = file->f_path.dentry;
241 struct inode *inode = dentry->d_inode;
243 put_write_access(inode);
245 if (special_file(inode->i_mode))
246 return;
247 if (file_check_writeable(file) != 0)
248 return;
249 mnt_drop_write(mnt);
250 file_release_write(file);
252 EXPORT_SYMBOL_GPL(drop_file_write_access);
254 /* __fput is called from task context when aio completion releases the last
255 * last use of a struct file *. Do not use otherwise.
257 void __fput(struct file *file)
259 struct dentry *dentry = file->f_path.dentry;
260 struct vfsmount *mnt = file->f_path.mnt;
261 struct inode *inode = dentry->d_inode;
263 might_sleep();
265 fsnotify_close(file);
267 * The function eventpoll_release() should be the first called
268 * in the file cleanup chain.
270 eventpoll_release(file);
271 locks_remove_flock(file);
273 if (unlikely(file->f_flags & FASYNC)) {
274 if (file->f_op && file->f_op->fasync)
275 file->f_op->fasync(-1, file, 0);
277 if (file->f_op && file->f_op->release)
278 file->f_op->release(inode, file);
279 security_file_free(file);
280 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
281 cdev_put(inode->i_cdev);
282 fops_put(file->f_op);
283 put_pid(file->f_owner.pid);
284 file_kill(file);
285 if (file->f_mode & FMODE_WRITE)
286 drop_file_write_access(file);
287 file->f_path.dentry = NULL;
288 file->f_path.mnt = NULL;
289 file_free(file);
290 dput(dentry);
291 mntput(mnt);
294 struct file *fget(unsigned int fd)
296 struct file *file;
297 struct files_struct *files = current->files;
299 rcu_read_lock();
300 file = fcheck_files(files, fd);
301 if (file) {
302 if (!atomic_long_inc_not_zero(&file->f_count)) {
303 /* File object ref couldn't be taken */
304 rcu_read_unlock();
305 return NULL;
308 rcu_read_unlock();
310 return file;
313 EXPORT_SYMBOL(fget);
316 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
317 * You can use this only if it is guranteed that the current task already
318 * holds a refcnt to that file. That check has to be done at fget() only
319 * and a flag is returned to be passed to the corresponding fput_light().
320 * There must not be a cloning between an fget_light/fput_light pair.
322 struct file *fget_light(unsigned int fd, int *fput_needed)
324 struct file *file;
325 struct files_struct *files = current->files;
327 *fput_needed = 0;
328 if (likely((atomic_read(&files->count) == 1))) {
329 file = fcheck_files(files, fd);
330 } else {
331 rcu_read_lock();
332 file = fcheck_files(files, fd);
333 if (file) {
334 if (atomic_long_inc_not_zero(&file->f_count))
335 *fput_needed = 1;
336 else
337 /* Didn't get the reference, someone's freed */
338 file = NULL;
340 rcu_read_unlock();
343 return file;
347 void put_filp(struct file *file)
349 if (atomic_long_dec_and_test(&file->f_count)) {
350 security_file_free(file);
351 file_kill(file);
352 file_free(file);
356 void file_move(struct file *file, struct list_head *list)
358 if (!list)
359 return;
360 file_list_lock();
361 list_move(&file->f_u.fu_list, list);
362 file_list_unlock();
365 void file_kill(struct file *file)
367 if (!list_empty(&file->f_u.fu_list)) {
368 file_list_lock();
369 list_del_init(&file->f_u.fu_list);
370 file_list_unlock();
374 int fs_may_remount_ro(struct super_block *sb)
376 struct file *file;
378 /* Check that no files are currently opened for writing. */
379 file_list_lock();
380 list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
381 struct inode *inode = file->f_path.dentry->d_inode;
383 /* File with pending delete? */
384 if (inode->i_nlink == 0)
385 goto too_bad;
387 /* Writeable file? */
388 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
389 goto too_bad;
391 file_list_unlock();
392 return 1; /* Tis' cool bro. */
393 too_bad:
394 file_list_unlock();
395 return 0;
399 * mark_files_ro - mark all files read-only
400 * @sb: superblock in question
402 * All files are marked read-only. We don't care about pending
403 * delete files so this should be used in 'force' mode only.
405 void mark_files_ro(struct super_block *sb)
407 struct file *f;
409 retry:
410 file_list_lock();
411 list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
412 struct vfsmount *mnt;
413 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
414 continue;
415 if (!file_count(f))
416 continue;
417 if (!(f->f_mode & FMODE_WRITE))
418 continue;
419 f->f_mode &= ~FMODE_WRITE;
420 if (file_check_writeable(f) != 0)
421 continue;
422 file_release_write(f);
423 mnt = mntget(f->f_path.mnt);
424 file_list_unlock();
426 * This can sleep, so we can't hold
427 * the file_list_lock() spinlock.
429 mnt_drop_write(mnt);
430 mntput(mnt);
431 goto retry;
433 file_list_unlock();
436 void __init files_init(unsigned long mempages)
438 int n;
440 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
441 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
444 * One file with associated inode and dcache is very roughly 1K.
445 * Per default don't use more than 10% of our memory for files.
448 n = (mempages * (PAGE_SIZE / 1024)) / 10;
449 files_stat.max_files = n;
450 if (files_stat.max_files < NR_FILE)
451 files_stat.max_files = NR_FILE;
452 files_defer_init();
453 percpu_counter_init(&nr_files, 0);