net-next: Add netif_get_num_default_rss_queues
[linux-2.6.git] / fs / file_table.c
bloba305d9e2d1b2aac05dcd456bdd23885652272439
1 /*
2 * linux/fs/file_table.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/lglock.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/ima.h>
28 #include <linux/atomic.h>
30 #include "internal.h"
32 /* sysctl tunables... */
33 struct files_stat_struct files_stat = {
34 .max_files = NR_FILE
37 DEFINE_LGLOCK(files_lglock);
39 /* SLAB cache for file structures */
40 static struct kmem_cache *filp_cachep __read_mostly;
42 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
44 static inline void file_free_rcu(struct rcu_head *head)
46 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
48 put_cred(f->f_cred);
49 kmem_cache_free(filp_cachep, f);
52 static inline void file_free(struct file *f)
54 percpu_counter_dec(&nr_files);
55 file_check_state(f);
56 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
60 * Return the total number of open files in the system
62 static long get_nr_files(void)
64 return percpu_counter_read_positive(&nr_files);
68 * Return the maximum number of open files in the system
70 unsigned long get_max_files(void)
72 return files_stat.max_files;
74 EXPORT_SYMBOL_GPL(get_max_files);
77 * Handle nr_files sysctl
79 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
80 int proc_nr_files(ctl_table *table, int write,
81 void __user *buffer, size_t *lenp, loff_t *ppos)
83 files_stat.nr_files = get_nr_files();
84 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
86 #else
87 int proc_nr_files(ctl_table *table, int write,
88 void __user *buffer, size_t *lenp, loff_t *ppos)
90 return -ENOSYS;
92 #endif
94 /* Find an unused file structure and return a pointer to it.
95 * Returns NULL, if there are no more free file structures or
96 * we run out of memory.
98 * Be very careful using this. You are responsible for
99 * getting write access to any mount that you might assign
100 * to this filp, if it is opened for write. If this is not
101 * done, you will imbalance int the mount's writer count
102 * and a warning at __fput() time.
104 struct file *get_empty_filp(void)
106 const struct cred *cred = current_cred();
107 static long old_max;
108 struct file * f;
111 * Privileged users can go above max_files
113 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
115 * percpu_counters are inaccurate. Do an expensive check before
116 * we go and fail.
118 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
119 goto over;
122 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
123 if (f == NULL)
124 goto fail;
126 percpu_counter_inc(&nr_files);
127 f->f_cred = get_cred(cred);
128 if (security_file_alloc(f))
129 goto fail_sec;
131 INIT_LIST_HEAD(&f->f_u.fu_list);
132 atomic_long_set(&f->f_count, 1);
133 rwlock_init(&f->f_owner.lock);
134 spin_lock_init(&f->f_lock);
135 eventpoll_init_file(f);
136 /* f->f_version: 0 */
137 return f;
139 over:
140 /* Ran out of filps - report that */
141 if (get_nr_files() > old_max) {
142 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
143 old_max = get_nr_files();
145 goto fail;
147 fail_sec:
148 file_free(f);
149 fail:
150 return NULL;
154 * alloc_file - allocate and initialize a 'struct file'
155 * @mnt: the vfsmount on which the file will reside
156 * @dentry: the dentry representing the new file
157 * @mode: the mode with which the new file will be opened
158 * @fop: the 'struct file_operations' for the new file
160 * Use this instead of get_empty_filp() to get a new
161 * 'struct file'. Do so because of the same initialization
162 * pitfalls reasons listed for init_file(). This is a
163 * preferred interface to using init_file().
165 * If all the callers of init_file() are eliminated, its
166 * code should be moved into this function.
168 struct file *alloc_file(struct path *path, fmode_t mode,
169 const struct file_operations *fop)
171 struct file *file;
173 file = get_empty_filp();
174 if (!file)
175 return NULL;
177 file->f_path = *path;
178 file->f_mapping = path->dentry->d_inode->i_mapping;
179 file->f_mode = mode;
180 file->f_op = fop;
183 * These mounts don't really matter in practice
184 * for r/o bind mounts. They aren't userspace-
185 * visible. We do this for consistency, and so
186 * that we can do debugging checks at __fput()
188 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
189 file_take_write(file);
190 WARN_ON(mnt_clone_write(path->mnt));
192 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
193 i_readcount_inc(path->dentry->d_inode);
194 return file;
196 EXPORT_SYMBOL(alloc_file);
199 * drop_file_write_access - give up ability to write to a file
200 * @file: the file to which we will stop writing
202 * This is a central place which will give up the ability
203 * to write to @file, along with access to write through
204 * its vfsmount.
206 static void drop_file_write_access(struct file *file)
208 struct vfsmount *mnt = file->f_path.mnt;
209 struct dentry *dentry = file->f_path.dentry;
210 struct inode *inode = dentry->d_inode;
212 put_write_access(inode);
214 if (special_file(inode->i_mode))
215 return;
216 if (file_check_writeable(file) != 0)
217 return;
218 mnt_drop_write(mnt);
219 file_release_write(file);
222 /* the real guts of fput() - releasing the last reference to file
224 static void __fput(struct file *file)
226 struct dentry *dentry = file->f_path.dentry;
227 struct vfsmount *mnt = file->f_path.mnt;
228 struct inode *inode = dentry->d_inode;
230 might_sleep();
232 fsnotify_close(file);
234 * The function eventpoll_release() should be the first called
235 * in the file cleanup chain.
237 eventpoll_release(file);
238 locks_remove_flock(file);
240 if (unlikely(file->f_flags & FASYNC)) {
241 if (file->f_op && file->f_op->fasync)
242 file->f_op->fasync(-1, file, 0);
244 if (file->f_op && file->f_op->release)
245 file->f_op->release(inode, file);
246 security_file_free(file);
247 ima_file_free(file);
248 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
249 !(file->f_mode & FMODE_PATH))) {
250 cdev_put(inode->i_cdev);
252 fops_put(file->f_op);
253 put_pid(file->f_owner.pid);
254 file_sb_list_del(file);
255 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
256 i_readcount_dec(inode);
257 if (file->f_mode & FMODE_WRITE)
258 drop_file_write_access(file);
259 file->f_path.dentry = NULL;
260 file->f_path.mnt = NULL;
261 file_free(file);
262 dput(dentry);
263 mntput(mnt);
266 void fput(struct file *file)
268 if (atomic_long_dec_and_test(&file->f_count))
269 __fput(file);
272 EXPORT_SYMBOL(fput);
274 struct file *fget(unsigned int fd)
276 struct file *file;
277 struct files_struct *files = current->files;
279 rcu_read_lock();
280 file = fcheck_files(files, fd);
281 if (file) {
282 /* File object ref couldn't be taken */
283 if (file->f_mode & FMODE_PATH ||
284 !atomic_long_inc_not_zero(&file->f_count))
285 file = NULL;
287 rcu_read_unlock();
289 return file;
292 EXPORT_SYMBOL(fget);
294 struct file *fget_raw(unsigned int fd)
296 struct file *file;
297 struct files_struct *files = current->files;
299 rcu_read_lock();
300 file = fcheck_files(files, fd);
301 if (file) {
302 /* File object ref couldn't be taken */
303 if (!atomic_long_inc_not_zero(&file->f_count))
304 file = NULL;
306 rcu_read_unlock();
308 return file;
311 EXPORT_SYMBOL(fget_raw);
314 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
316 * You can use this instead of fget if you satisfy all of the following
317 * conditions:
318 * 1) You must call fput_light before exiting the syscall and returning control
319 * to userspace (i.e. you cannot remember the returned struct file * after
320 * returning to userspace).
321 * 2) You must not call filp_close on the returned struct file * in between
322 * calls to fget_light and fput_light.
323 * 3) You must not clone the current task in between the calls to fget_light
324 * and fput_light.
326 * The fput_needed flag returned by fget_light should be passed to the
327 * corresponding fput_light.
329 struct file *fget_light(unsigned int fd, int *fput_needed)
331 struct file *file;
332 struct files_struct *files = current->files;
334 *fput_needed = 0;
335 if (atomic_read(&files->count) == 1) {
336 file = fcheck_files(files, fd);
337 if (file && (file->f_mode & FMODE_PATH))
338 file = NULL;
339 } else {
340 rcu_read_lock();
341 file = fcheck_files(files, fd);
342 if (file) {
343 if (!(file->f_mode & FMODE_PATH) &&
344 atomic_long_inc_not_zero(&file->f_count))
345 *fput_needed = 1;
346 else
347 /* Didn't get the reference, someone's freed */
348 file = NULL;
350 rcu_read_unlock();
353 return file;
356 struct file *fget_raw_light(unsigned int fd, int *fput_needed)
358 struct file *file;
359 struct files_struct *files = current->files;
361 *fput_needed = 0;
362 if (atomic_read(&files->count) == 1) {
363 file = fcheck_files(files, fd);
364 } else {
365 rcu_read_lock();
366 file = fcheck_files(files, fd);
367 if (file) {
368 if (atomic_long_inc_not_zero(&file->f_count))
369 *fput_needed = 1;
370 else
371 /* Didn't get the reference, someone's freed */
372 file = NULL;
374 rcu_read_unlock();
377 return file;
380 void put_filp(struct file *file)
382 if (atomic_long_dec_and_test(&file->f_count)) {
383 security_file_free(file);
384 file_sb_list_del(file);
385 file_free(file);
389 static inline int file_list_cpu(struct file *file)
391 #ifdef CONFIG_SMP
392 return file->f_sb_list_cpu;
393 #else
394 return smp_processor_id();
395 #endif
398 /* helper for file_sb_list_add to reduce ifdefs */
399 static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
401 struct list_head *list;
402 #ifdef CONFIG_SMP
403 int cpu;
404 cpu = smp_processor_id();
405 file->f_sb_list_cpu = cpu;
406 list = per_cpu_ptr(sb->s_files, cpu);
407 #else
408 list = &sb->s_files;
409 #endif
410 list_add(&file->f_u.fu_list, list);
414 * file_sb_list_add - add a file to the sb's file list
415 * @file: file to add
416 * @sb: sb to add it to
418 * Use this function to associate a file with the superblock of the inode it
419 * refers to.
421 void file_sb_list_add(struct file *file, struct super_block *sb)
423 lg_local_lock(&files_lglock);
424 __file_sb_list_add(file, sb);
425 lg_local_unlock(&files_lglock);
429 * file_sb_list_del - remove a file from the sb's file list
430 * @file: file to remove
431 * @sb: sb to remove it from
433 * Use this function to remove a file from its superblock.
435 void file_sb_list_del(struct file *file)
437 if (!list_empty(&file->f_u.fu_list)) {
438 lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
439 list_del_init(&file->f_u.fu_list);
440 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
444 #ifdef CONFIG_SMP
447 * These macros iterate all files on all CPUs for a given superblock.
448 * files_lglock must be held globally.
450 #define do_file_list_for_each_entry(__sb, __file) \
452 int i; \
453 for_each_possible_cpu(i) { \
454 struct list_head *list; \
455 list = per_cpu_ptr((__sb)->s_files, i); \
456 list_for_each_entry((__file), list, f_u.fu_list)
458 #define while_file_list_for_each_entry \
462 #else
464 #define do_file_list_for_each_entry(__sb, __file) \
466 struct list_head *list; \
467 list = &(sb)->s_files; \
468 list_for_each_entry((__file), list, f_u.fu_list)
470 #define while_file_list_for_each_entry \
473 #endif
476 * mark_files_ro - mark all files read-only
477 * @sb: superblock in question
479 * All files are marked read-only. We don't care about pending
480 * delete files so this should be used in 'force' mode only.
482 void mark_files_ro(struct super_block *sb)
484 struct file *f;
486 retry:
487 lg_global_lock(&files_lglock);
488 do_file_list_for_each_entry(sb, f) {
489 struct vfsmount *mnt;
490 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
491 continue;
492 if (!file_count(f))
493 continue;
494 if (!(f->f_mode & FMODE_WRITE))
495 continue;
496 spin_lock(&f->f_lock);
497 f->f_mode &= ~FMODE_WRITE;
498 spin_unlock(&f->f_lock);
499 if (file_check_writeable(f) != 0)
500 continue;
501 file_release_write(f);
502 mnt = mntget(f->f_path.mnt);
503 /* This can sleep, so we can't hold the spinlock. */
504 lg_global_unlock(&files_lglock);
505 mnt_drop_write(mnt);
506 mntput(mnt);
507 goto retry;
508 } while_file_list_for_each_entry;
509 lg_global_unlock(&files_lglock);
512 void __init files_init(unsigned long mempages)
514 unsigned long n;
516 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
517 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
520 * One file with associated inode and dcache is very roughly 1K.
521 * Per default don't use more than 10% of our memory for files.
524 n = (mempages * (PAGE_SIZE / 1024)) / 10;
525 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
526 files_defer_init();
527 lg_lock_init(&files_lglock, "files_lglock");
528 percpu_counter_init(&nr_files, 0);