Import 2.3.12pre9
[davej-history.git] / fs / file_table.c
blobee2f4e78804c87e3b3ed5013f55a488bc9638705
1 /*
2 * linux/fs/file_table.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/init.h>
12 #include <linux/smp_lock.h>
14 /* SLAB cache for filp's. */
15 static kmem_cache_t *filp_cache;
17 /* sysctl tunables... */
18 int nr_files = 0; /* read only */
19 int nr_free_files = 0; /* read only */
20 int max_files = NR_FILE;/* tunable */
22 /* Here the new files go */
23 static LIST_HEAD(anon_list);
24 /* And here the free ones sit */
25 static LIST_HEAD(free_list);
26 /* public *and* exported. Not pretty! */
27 spinlock_t files_lock = SPIN_LOCK_UNLOCKED;
29 void __init file_table_init(void)
31 filp_cache = kmem_cache_create("filp", sizeof(struct file),
33 SLAB_HWCACHE_ALIGN, NULL, NULL);
34 if(!filp_cache)
35 panic("VFS: Cannot alloc filp SLAB cache.");
37 * We could allocate the reserved files here, but really
38 * shouldn't need to: the normal boot process will create
39 * plenty of free files.
43 /* Find an unused file structure and return a pointer to it.
44 * Returns NULL, if there are no more free file structures or
45 * we run out of memory.
47 * SMP-safe.
49 struct file * get_empty_filp(void)
51 static int old_max = 0;
52 struct file * f;
54 file_list_lock();
55 if (nr_free_files > NR_RESERVED_FILES) {
56 used_one:
57 f = list_entry(free_list.next, struct file, f_list);
58 list_del(&f->f_list);
59 nr_free_files--;
60 new_one:
61 file_list_unlock();
62 memset(f, 0, sizeof(*f));
63 atomic_set(&f->f_count,1);
64 f->f_version = ++event;
65 f->f_uid = current->fsuid;
66 f->f_gid = current->fsgid;
67 file_list_lock();
68 list_add(&f->f_list, &anon_list);
69 file_list_unlock();
70 return f;
73 * Use a reserved one if we're the superuser
75 if (nr_free_files && !current->euid)
76 goto used_one;
78 * Allocate a new one if we're below the limit.
80 if (nr_files < max_files) {
81 file_list_unlock();
82 f = kmem_cache_alloc(filp_cache, SLAB_KERNEL);
83 file_list_lock();
84 if (f) {
85 nr_files++;
86 goto new_one;
88 /* Big problems... */
89 printk("VFS: filp allocation failed\n");
91 } else if (max_files > old_max) {
92 printk("VFS: file-max limit %d reached\n", max_files);
93 old_max = max_files;
95 file_list_unlock();
96 return NULL;
100 * Clear and initialize a (private) struct file for the given dentry,
101 * and call the open function (if any). The caller must verify that
102 * inode->i_op and inode->i_op->default_file_ops are not NULL.
104 int init_private_file(struct file *filp, struct dentry *dentry, int mode)
106 memset(filp, 0, sizeof(*filp));
107 filp->f_mode = mode;
108 atomic_set(&filp->f_count, 1);
109 filp->f_dentry = dentry;
110 filp->f_uid = current->fsuid;
111 filp->f_gid = current->fsgid;
112 filp->f_op = dentry->d_inode->i_op->default_file_ops;
113 if (filp->f_op->open)
114 return filp->f_op->open(dentry->d_inode, filp);
115 else
116 return 0;
120 * Called when retiring the last use of a file pointer.
122 static void __fput(struct file *filp)
124 struct dentry * dentry = filp->f_dentry;
125 struct inode * inode = dentry->d_inode;
127 if (filp->f_op && filp->f_op->release)
128 filp->f_op->release(inode, filp);
129 filp->f_dentry = NULL;
130 if (filp->f_mode & FMODE_WRITE)
131 put_write_access(inode);
132 dput(dentry);
135 void _fput(struct file *file)
137 lock_kernel();
138 locks_remove_flock(file); /* Still need the */
139 __fput(file); /* big lock here. */
140 unlock_kernel();
142 file_list_lock();
143 list_del(&file->f_list);
144 list_add(&file->f_list, &free_list);
145 nr_free_files++;
146 file_list_unlock();
149 /* Here. put_filp() is SMP-safe now. */
151 void put_filp(struct file *file)
153 if(atomic_dec_and_test(&file->f_count)) {
154 file_list_lock();
155 list_del(&file->f_list);
156 list_add(&file->f_list, &free_list);
157 nr_free_files++;
158 file_list_unlock();
162 void file_move(struct file *file, struct list_head *list)
164 if (!list)
165 return;
166 file_list_lock();
167 list_del(&file->f_list);
168 list_add(&file->f_list, list);
169 file_list_unlock();
172 void file_moveto(struct file *new, struct file *old)
174 file_list_lock();
175 list_del(&new->f_list);
176 list_add(&new->f_list, &old->f_list);
177 file_list_unlock();
180 int fs_may_remount_ro(struct super_block *sb)
182 struct list_head *p;
184 /* Check that no files are currently opened for writing. */
185 file_list_lock();
186 for (p = sb->s_files.next; p != &sb->s_files; p = p->next) {
187 struct file *file = list_entry(p, struct file, f_list);
188 struct inode *inode = file->f_dentry->d_inode;
189 if (!inode)
190 continue;
192 /* File with pending delete? */
193 if (inode->i_nlink == 0)
194 goto too_bad;
196 /* Writable file? */
197 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
198 goto too_bad;
200 file_list_unlock();
201 return 1; /* Tis' cool bro. */
202 too_bad:
203 file_list_unlock();
204 return 0;