2 * linux/fs/proc/inode.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/time.h>
8 #include <linux/proc_fs.h>
9 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/stat.h>
13 #include <linux/completion.h>
14 #include <linux/poll.h>
15 #include <linux/file.h>
16 #include <linux/limits.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/smp_lock.h>
20 #include <linux/sysctl.h>
22 #include <asm/system.h>
23 #include <asm/uaccess.h>
27 struct proc_dir_entry
*de_get(struct proc_dir_entry
*de
)
29 atomic_inc(&de
->count
);
34 * Decrements the use count and checks for deferred deletion.
36 void de_put(struct proc_dir_entry
*de
)
39 if (!atomic_read(&de
->count
)) {
40 printk("de_put: entry %s already free!\n", de
->name
);
45 if (atomic_dec_and_test(&de
->count
))
51 * Decrement the use count of the proc_dir_entry.
53 static void proc_delete_inode(struct inode
*inode
)
55 struct proc_dir_entry
*de
;
57 truncate_inode_pages(&inode
->i_data
, 0);
59 /* Stop tracking associated processes */
60 put_pid(PROC_I(inode
)->pid
);
62 /* Let go of any associated proc directory entry */
63 de
= PROC_I(inode
)->pde
;
66 module_put(de
->owner
);
69 if (PROC_I(inode
)->sysctl
)
70 sysctl_head_put(PROC_I(inode
)->sysctl
);
74 struct vfsmount
*proc_mnt
;
76 static struct kmem_cache
* proc_inode_cachep
;
78 static struct inode
*proc_alloc_inode(struct super_block
*sb
)
80 struct proc_inode
*ei
;
83 ei
= (struct proc_inode
*)kmem_cache_alloc(proc_inode_cachep
, GFP_KERNEL
);
88 ei
->op
.proc_get_link
= NULL
;
91 ei
->sysctl_entry
= NULL
;
92 inode
= &ei
->vfs_inode
;
93 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
97 static void proc_destroy_inode(struct inode
*inode
)
99 kmem_cache_free(proc_inode_cachep
, PROC_I(inode
));
102 static void init_once(void *foo
)
104 struct proc_inode
*ei
= (struct proc_inode
*) foo
;
106 inode_init_once(&ei
->vfs_inode
);
109 void __init
proc_init_inodecache(void)
111 proc_inode_cachep
= kmem_cache_create("proc_inode_cache",
112 sizeof(struct proc_inode
),
113 0, (SLAB_RECLAIM_ACCOUNT
|
114 SLAB_MEM_SPREAD
|SLAB_PANIC
),
118 static const struct super_operations proc_sops
= {
119 .alloc_inode
= proc_alloc_inode
,
120 .destroy_inode
= proc_destroy_inode
,
121 .drop_inode
= generic_delete_inode
,
122 .delete_inode
= proc_delete_inode
,
123 .statfs
= simple_statfs
,
126 static void __pde_users_dec(struct proc_dir_entry
*pde
)
129 if (pde
->pde_unload_completion
&& pde
->pde_users
== 0)
130 complete(pde
->pde_unload_completion
);
133 static void pde_users_dec(struct proc_dir_entry
*pde
)
135 spin_lock(&pde
->pde_unload_lock
);
136 __pde_users_dec(pde
);
137 spin_unlock(&pde
->pde_unload_lock
);
140 static loff_t
proc_reg_llseek(struct file
*file
, loff_t offset
, int whence
)
142 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
144 loff_t (*llseek
)(struct file
*, loff_t
, int);
146 spin_lock(&pde
->pde_unload_lock
);
148 * remove_proc_entry() is going to delete PDE (as part of module
149 * cleanup sequence). No new callers into module allowed.
151 if (!pde
->proc_fops
) {
152 spin_unlock(&pde
->pde_unload_lock
);
156 * Bump refcount so that remove_proc_entry will wail for ->llseek to
161 * Save function pointer under lock, to protect against ->proc_fops
162 * NULL'ifying right after ->pde_unload_lock is dropped.
164 llseek
= pde
->proc_fops
->llseek
;
165 spin_unlock(&pde
->pde_unload_lock
);
168 llseek
= default_llseek
;
169 rv
= llseek(file
, offset
, whence
);
175 static ssize_t
proc_reg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
177 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
179 ssize_t (*read
)(struct file
*, char __user
*, size_t, loff_t
*);
181 spin_lock(&pde
->pde_unload_lock
);
182 if (!pde
->proc_fops
) {
183 spin_unlock(&pde
->pde_unload_lock
);
187 read
= pde
->proc_fops
->read
;
188 spin_unlock(&pde
->pde_unload_lock
);
191 rv
= read(file
, buf
, count
, ppos
);
197 static ssize_t
proc_reg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
199 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
201 ssize_t (*write
)(struct file
*, const char __user
*, size_t, loff_t
*);
203 spin_lock(&pde
->pde_unload_lock
);
204 if (!pde
->proc_fops
) {
205 spin_unlock(&pde
->pde_unload_lock
);
209 write
= pde
->proc_fops
->write
;
210 spin_unlock(&pde
->pde_unload_lock
);
213 rv
= write(file
, buf
, count
, ppos
);
219 static unsigned int proc_reg_poll(struct file
*file
, struct poll_table_struct
*pts
)
221 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
222 unsigned int rv
= DEFAULT_POLLMASK
;
223 unsigned int (*poll
)(struct file
*, struct poll_table_struct
*);
225 spin_lock(&pde
->pde_unload_lock
);
226 if (!pde
->proc_fops
) {
227 spin_unlock(&pde
->pde_unload_lock
);
231 poll
= pde
->proc_fops
->poll
;
232 spin_unlock(&pde
->pde_unload_lock
);
235 rv
= poll(file
, pts
);
241 static long proc_reg_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
243 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
245 long (*unlocked_ioctl
)(struct file
*, unsigned int, unsigned long);
246 int (*ioctl
)(struct inode
*, struct file
*, unsigned int, unsigned long);
248 spin_lock(&pde
->pde_unload_lock
);
249 if (!pde
->proc_fops
) {
250 spin_unlock(&pde
->pde_unload_lock
);
254 unlocked_ioctl
= pde
->proc_fops
->unlocked_ioctl
;
255 ioctl
= pde
->proc_fops
->ioctl
;
256 spin_unlock(&pde
->pde_unload_lock
);
258 if (unlocked_ioctl
) {
259 rv
= unlocked_ioctl(file
, cmd
, arg
);
260 if (rv
== -ENOIOCTLCMD
)
264 rv
= ioctl(file
->f_path
.dentry
->d_inode
, file
, cmd
, arg
);
273 static long proc_reg_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
275 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
277 long (*compat_ioctl
)(struct file
*, unsigned int, unsigned long);
279 spin_lock(&pde
->pde_unload_lock
);
280 if (!pde
->proc_fops
) {
281 spin_unlock(&pde
->pde_unload_lock
);
285 compat_ioctl
= pde
->proc_fops
->compat_ioctl
;
286 spin_unlock(&pde
->pde_unload_lock
);
289 rv
= compat_ioctl(file
, cmd
, arg
);
296 static int proc_reg_mmap(struct file
*file
, struct vm_area_struct
*vma
)
298 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
300 int (*mmap
)(struct file
*, struct vm_area_struct
*);
302 spin_lock(&pde
->pde_unload_lock
);
303 if (!pde
->proc_fops
) {
304 spin_unlock(&pde
->pde_unload_lock
);
308 mmap
= pde
->proc_fops
->mmap
;
309 spin_unlock(&pde
->pde_unload_lock
);
312 rv
= mmap(file
, vma
);
318 static int proc_reg_open(struct inode
*inode
, struct file
*file
)
320 struct proc_dir_entry
*pde
= PDE(inode
);
322 int (*open
)(struct inode
*, struct file
*);
323 int (*release
)(struct inode
*, struct file
*);
324 struct pde_opener
*pdeo
;
327 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
328 * sequence. ->release won't be called because ->proc_fops will be
329 * cleared. Depending on complexity of ->release, consequences vary.
331 * We can't wait for mercy when close will be done for real, it's
332 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
333 * by hand in remove_proc_entry(). For this, save opener's credentials
336 pdeo
= kmalloc(sizeof(struct pde_opener
), GFP_KERNEL
);
340 spin_lock(&pde
->pde_unload_lock
);
341 if (!pde
->proc_fops
) {
342 spin_unlock(&pde
->pde_unload_lock
);
347 open
= pde
->proc_fops
->open
;
348 release
= pde
->proc_fops
->release
;
349 spin_unlock(&pde
->pde_unload_lock
);
352 rv
= open(inode
, file
);
354 spin_lock(&pde
->pde_unload_lock
);
355 if (rv
== 0 && release
) {
356 /* To know what to release. */
359 /* Strictly for "too late" ->release in proc_reg_release(). */
360 pdeo
->release
= release
;
361 list_add(&pdeo
->lh
, &pde
->pde_openers
);
364 __pde_users_dec(pde
);
365 spin_unlock(&pde
->pde_unload_lock
);
369 static struct pde_opener
*find_pde_opener(struct proc_dir_entry
*pde
,
370 struct inode
*inode
, struct file
*file
)
372 struct pde_opener
*pdeo
;
374 list_for_each_entry(pdeo
, &pde
->pde_openers
, lh
) {
375 if (pdeo
->inode
== inode
&& pdeo
->file
== file
)
381 static int proc_reg_release(struct inode
*inode
, struct file
*file
)
383 struct proc_dir_entry
*pde
= PDE(inode
);
385 int (*release
)(struct inode
*, struct file
*);
386 struct pde_opener
*pdeo
;
388 spin_lock(&pde
->pde_unload_lock
);
389 pdeo
= find_pde_opener(pde
, inode
, file
);
390 if (!pde
->proc_fops
) {
392 * Can't simply exit, __fput() will think that everything is OK,
393 * and move on to freeing struct file. remove_proc_entry() will
394 * find slacker in opener's list and will try to do non-trivial
395 * things with struct file. Therefore, remove opener from list.
397 * But if opener is removed from list, who will ->release it?
401 spin_unlock(&pde
->pde_unload_lock
);
402 rv
= pdeo
->release(inode
, file
);
405 spin_unlock(&pde
->pde_unload_lock
);
409 release
= pde
->proc_fops
->release
;
414 spin_unlock(&pde
->pde_unload_lock
);
417 rv
= release(inode
, file
);
423 static const struct file_operations proc_reg_file_ops
= {
424 .llseek
= proc_reg_llseek
,
425 .read
= proc_reg_read
,
426 .write
= proc_reg_write
,
427 .poll
= proc_reg_poll
,
428 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
430 .compat_ioctl
= proc_reg_compat_ioctl
,
432 .mmap
= proc_reg_mmap
,
433 .open
= proc_reg_open
,
434 .release
= proc_reg_release
,
438 static const struct file_operations proc_reg_file_ops_no_compat
= {
439 .llseek
= proc_reg_llseek
,
440 .read
= proc_reg_read
,
441 .write
= proc_reg_write
,
442 .poll
= proc_reg_poll
,
443 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
444 .mmap
= proc_reg_mmap
,
445 .open
= proc_reg_open
,
446 .release
= proc_reg_release
,
450 struct inode
*proc_get_inode(struct super_block
*sb
, unsigned int ino
,
451 struct proc_dir_entry
*de
)
453 struct inode
* inode
;
455 if (!try_module_get(de
->owner
))
458 inode
= iget_locked(sb
, ino
);
461 if (inode
->i_state
& I_NEW
) {
462 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
463 PROC_I(inode
)->fd
= 0;
464 PROC_I(inode
)->pde
= de
;
467 inode
->i_mode
= de
->mode
;
468 inode
->i_uid
= de
->uid
;
469 inode
->i_gid
= de
->gid
;
472 inode
->i_size
= de
->size
;
474 inode
->i_nlink
= de
->nlink
;
476 inode
->i_op
= de
->proc_iops
;
478 if (S_ISREG(inode
->i_mode
)) {
480 if (!de
->proc_fops
->compat_ioctl
)
482 &proc_reg_file_ops_no_compat
;
485 inode
->i_fop
= &proc_reg_file_ops
;
487 inode
->i_fop
= de
->proc_fops
;
490 unlock_new_inode(inode
);
492 module_put(de
->owner
);
496 module_put(de
->owner
);
501 int proc_fill_super(struct super_block
*s
)
503 struct inode
* root_inode
;
505 s
->s_flags
|= MS_NODIRATIME
| MS_NOSUID
| MS_NOEXEC
;
506 s
->s_blocksize
= 1024;
507 s
->s_blocksize_bits
= 10;
508 s
->s_magic
= PROC_SUPER_MAGIC
;
509 s
->s_op
= &proc_sops
;
513 root_inode
= proc_get_inode(s
, PROC_ROOT_INO
, &proc_root
);
516 root_inode
->i_uid
= 0;
517 root_inode
->i_gid
= 0;
518 s
->s_root
= d_alloc_root(root_inode
);
524 printk("proc_read_super: get root inode failed\n");