2 * linux/fs/proc/inode.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/time.h>
8 #include <linux/proc_fs.h>
9 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/stat.h>
13 #include <linux/completion.h>
14 #include <linux/poll.h>
15 #include <linux/file.h>
16 #include <linux/limits.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/smp_lock.h>
20 #include <linux/sysctl.h>
22 #include <asm/system.h>
23 #include <asm/uaccess.h>
27 struct proc_dir_entry
*de_get(struct proc_dir_entry
*de
)
29 atomic_inc(&de
->count
);
34 * Decrements the use count and checks for deferred deletion.
36 void de_put(struct proc_dir_entry
*de
)
39 if (!atomic_read(&de
->count
)) {
40 printk("de_put: entry %s already free!\n", de
->name
);
45 if (atomic_dec_and_test(&de
->count
))
51 * Decrement the use count of the proc_dir_entry.
53 static void proc_delete_inode(struct inode
*inode
)
55 struct proc_dir_entry
*de
;
57 truncate_inode_pages(&inode
->i_data
, 0);
59 /* Stop tracking associated processes */
60 put_pid(PROC_I(inode
)->pid
);
62 /* Let go of any associated proc directory entry */
63 de
= PROC_I(inode
)->pde
;
66 module_put(de
->owner
);
69 if (PROC_I(inode
)->sysctl
)
70 sysctl_head_put(PROC_I(inode
)->sysctl
);
74 struct vfsmount
*proc_mnt
;
76 static struct kmem_cache
* proc_inode_cachep
;
78 static struct inode
*proc_alloc_inode(struct super_block
*sb
)
80 struct proc_inode
*ei
;
83 ei
= (struct proc_inode
*)kmem_cache_alloc(proc_inode_cachep
, GFP_KERNEL
);
88 ei
->op
.proc_get_link
= NULL
;
91 ei
->sysctl_entry
= NULL
;
92 inode
= &ei
->vfs_inode
;
93 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
97 static void proc_destroy_inode(struct inode
*inode
)
99 kmem_cache_free(proc_inode_cachep
, PROC_I(inode
));
102 static void init_once(void *foo
)
104 struct proc_inode
*ei
= (struct proc_inode
*) foo
;
106 inode_init_once(&ei
->vfs_inode
);
109 int __init
proc_init_inodecache(void)
111 proc_inode_cachep
= kmem_cache_create("proc_inode_cache",
112 sizeof(struct proc_inode
),
113 0, (SLAB_RECLAIM_ACCOUNT
|
114 SLAB_MEM_SPREAD
|SLAB_PANIC
),
119 static const struct super_operations proc_sops
= {
120 .alloc_inode
= proc_alloc_inode
,
121 .destroy_inode
= proc_destroy_inode
,
122 .drop_inode
= generic_delete_inode
,
123 .delete_inode
= proc_delete_inode
,
124 .statfs
= simple_statfs
,
127 static void __pde_users_dec(struct proc_dir_entry
*pde
)
130 if (pde
->pde_unload_completion
&& pde
->pde_users
== 0)
131 complete(pde
->pde_unload_completion
);
134 static void pde_users_dec(struct proc_dir_entry
*pde
)
136 spin_lock(&pde
->pde_unload_lock
);
137 __pde_users_dec(pde
);
138 spin_unlock(&pde
->pde_unload_lock
);
141 static loff_t
proc_reg_llseek(struct file
*file
, loff_t offset
, int whence
)
143 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
145 loff_t (*llseek
)(struct file
*, loff_t
, int);
147 spin_lock(&pde
->pde_unload_lock
);
149 * remove_proc_entry() is going to delete PDE (as part of module
150 * cleanup sequence). No new callers into module allowed.
152 if (!pde
->proc_fops
) {
153 spin_unlock(&pde
->pde_unload_lock
);
157 * Bump refcount so that remove_proc_entry will wail for ->llseek to
162 * Save function pointer under lock, to protect against ->proc_fops
163 * NULL'ifying right after ->pde_unload_lock is dropped.
165 llseek
= pde
->proc_fops
->llseek
;
166 spin_unlock(&pde
->pde_unload_lock
);
169 llseek
= default_llseek
;
170 rv
= llseek(file
, offset
, whence
);
176 static ssize_t
proc_reg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
178 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
180 ssize_t (*read
)(struct file
*, char __user
*, size_t, loff_t
*);
182 spin_lock(&pde
->pde_unload_lock
);
183 if (!pde
->proc_fops
) {
184 spin_unlock(&pde
->pde_unload_lock
);
188 read
= pde
->proc_fops
->read
;
189 spin_unlock(&pde
->pde_unload_lock
);
192 rv
= read(file
, buf
, count
, ppos
);
198 static ssize_t
proc_reg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
200 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
202 ssize_t (*write
)(struct file
*, const char __user
*, size_t, loff_t
*);
204 spin_lock(&pde
->pde_unload_lock
);
205 if (!pde
->proc_fops
) {
206 spin_unlock(&pde
->pde_unload_lock
);
210 write
= pde
->proc_fops
->write
;
211 spin_unlock(&pde
->pde_unload_lock
);
214 rv
= write(file
, buf
, count
, ppos
);
220 static unsigned int proc_reg_poll(struct file
*file
, struct poll_table_struct
*pts
)
222 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
223 unsigned int rv
= DEFAULT_POLLMASK
;
224 unsigned int (*poll
)(struct file
*, struct poll_table_struct
*);
226 spin_lock(&pde
->pde_unload_lock
);
227 if (!pde
->proc_fops
) {
228 spin_unlock(&pde
->pde_unload_lock
);
232 poll
= pde
->proc_fops
->poll
;
233 spin_unlock(&pde
->pde_unload_lock
);
236 rv
= poll(file
, pts
);
242 static long proc_reg_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
244 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
246 long (*unlocked_ioctl
)(struct file
*, unsigned int, unsigned long);
247 int (*ioctl
)(struct inode
*, struct file
*, unsigned int, unsigned long);
249 spin_lock(&pde
->pde_unload_lock
);
250 if (!pde
->proc_fops
) {
251 spin_unlock(&pde
->pde_unload_lock
);
255 unlocked_ioctl
= pde
->proc_fops
->unlocked_ioctl
;
256 ioctl
= pde
->proc_fops
->ioctl
;
257 spin_unlock(&pde
->pde_unload_lock
);
259 if (unlocked_ioctl
) {
260 rv
= unlocked_ioctl(file
, cmd
, arg
);
261 if (rv
== -ENOIOCTLCMD
)
265 rv
= ioctl(file
->f_path
.dentry
->d_inode
, file
, cmd
, arg
);
274 static long proc_reg_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
276 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
278 long (*compat_ioctl
)(struct file
*, unsigned int, unsigned long);
280 spin_lock(&pde
->pde_unload_lock
);
281 if (!pde
->proc_fops
) {
282 spin_unlock(&pde
->pde_unload_lock
);
286 compat_ioctl
= pde
->proc_fops
->compat_ioctl
;
287 spin_unlock(&pde
->pde_unload_lock
);
290 rv
= compat_ioctl(file
, cmd
, arg
);
297 static int proc_reg_mmap(struct file
*file
, struct vm_area_struct
*vma
)
299 struct proc_dir_entry
*pde
= PDE(file
->f_path
.dentry
->d_inode
);
301 int (*mmap
)(struct file
*, struct vm_area_struct
*);
303 spin_lock(&pde
->pde_unload_lock
);
304 if (!pde
->proc_fops
) {
305 spin_unlock(&pde
->pde_unload_lock
);
309 mmap
= pde
->proc_fops
->mmap
;
310 spin_unlock(&pde
->pde_unload_lock
);
313 rv
= mmap(file
, vma
);
319 static int proc_reg_open(struct inode
*inode
, struct file
*file
)
321 struct proc_dir_entry
*pde
= PDE(inode
);
323 int (*open
)(struct inode
*, struct file
*);
324 int (*release
)(struct inode
*, struct file
*);
325 struct pde_opener
*pdeo
;
328 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
329 * sequence. ->release won't be called because ->proc_fops will be
330 * cleared. Depending on complexity of ->release, consequences vary.
332 * We can't wait for mercy when close will be done for real, it's
333 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
334 * by hand in remove_proc_entry(). For this, save opener's credentials
337 pdeo
= kmalloc(sizeof(struct pde_opener
), GFP_KERNEL
);
341 spin_lock(&pde
->pde_unload_lock
);
342 if (!pde
->proc_fops
) {
343 spin_unlock(&pde
->pde_unload_lock
);
348 open
= pde
->proc_fops
->open
;
349 release
= pde
->proc_fops
->release
;
350 spin_unlock(&pde
->pde_unload_lock
);
353 rv
= open(inode
, file
);
355 spin_lock(&pde
->pde_unload_lock
);
356 if (rv
== 0 && release
) {
357 /* To know what to release. */
360 /* Strictly for "too late" ->release in proc_reg_release(). */
361 pdeo
->release
= release
;
362 list_add(&pdeo
->lh
, &pde
->pde_openers
);
365 __pde_users_dec(pde
);
366 spin_unlock(&pde
->pde_unload_lock
);
370 static struct pde_opener
*find_pde_opener(struct proc_dir_entry
*pde
,
371 struct inode
*inode
, struct file
*file
)
373 struct pde_opener
*pdeo
;
375 list_for_each_entry(pdeo
, &pde
->pde_openers
, lh
) {
376 if (pdeo
->inode
== inode
&& pdeo
->file
== file
)
382 static int proc_reg_release(struct inode
*inode
, struct file
*file
)
384 struct proc_dir_entry
*pde
= PDE(inode
);
386 int (*release
)(struct inode
*, struct file
*);
387 struct pde_opener
*pdeo
;
389 spin_lock(&pde
->pde_unload_lock
);
390 pdeo
= find_pde_opener(pde
, inode
, file
);
391 if (!pde
->proc_fops
) {
393 * Can't simply exit, __fput() will think that everything is OK,
394 * and move on to freeing struct file. remove_proc_entry() will
395 * find slacker in opener's list and will try to do non-trivial
396 * things with struct file. Therefore, remove opener from list.
398 * But if opener is removed from list, who will ->release it?
402 spin_unlock(&pde
->pde_unload_lock
);
403 rv
= pdeo
->release(inode
, file
);
406 spin_unlock(&pde
->pde_unload_lock
);
410 release
= pde
->proc_fops
->release
;
415 spin_unlock(&pde
->pde_unload_lock
);
418 rv
= release(inode
, file
);
424 static const struct file_operations proc_reg_file_ops
= {
425 .llseek
= proc_reg_llseek
,
426 .read
= proc_reg_read
,
427 .write
= proc_reg_write
,
428 .poll
= proc_reg_poll
,
429 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
431 .compat_ioctl
= proc_reg_compat_ioctl
,
433 .mmap
= proc_reg_mmap
,
434 .open
= proc_reg_open
,
435 .release
= proc_reg_release
,
439 static const struct file_operations proc_reg_file_ops_no_compat
= {
440 .llseek
= proc_reg_llseek
,
441 .read
= proc_reg_read
,
442 .write
= proc_reg_write
,
443 .poll
= proc_reg_poll
,
444 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
445 .mmap
= proc_reg_mmap
,
446 .open
= proc_reg_open
,
447 .release
= proc_reg_release
,
451 struct inode
*proc_get_inode(struct super_block
*sb
, unsigned int ino
,
452 struct proc_dir_entry
*de
)
454 struct inode
* inode
;
456 if (!try_module_get(de
->owner
))
459 inode
= iget_locked(sb
, ino
);
462 if (inode
->i_state
& I_NEW
) {
463 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
464 PROC_I(inode
)->fd
= 0;
465 PROC_I(inode
)->pde
= de
;
468 inode
->i_mode
= de
->mode
;
469 inode
->i_uid
= de
->uid
;
470 inode
->i_gid
= de
->gid
;
473 inode
->i_size
= de
->size
;
475 inode
->i_nlink
= de
->nlink
;
477 inode
->i_op
= de
->proc_iops
;
479 if (S_ISREG(inode
->i_mode
)) {
481 if (!de
->proc_fops
->compat_ioctl
)
483 &proc_reg_file_ops_no_compat
;
486 inode
->i_fop
= &proc_reg_file_ops
;
488 inode
->i_fop
= de
->proc_fops
;
491 unlock_new_inode(inode
);
493 module_put(de
->owner
);
497 module_put(de
->owner
);
502 int proc_fill_super(struct super_block
*s
)
504 struct inode
* root_inode
;
506 s
->s_flags
|= MS_NODIRATIME
| MS_NOSUID
| MS_NOEXEC
;
507 s
->s_blocksize
= 1024;
508 s
->s_blocksize_bits
= 10;
509 s
->s_magic
= PROC_SUPER_MAGIC
;
510 s
->s_op
= &proc_sops
;
514 root_inode
= proc_get_inode(s
, PROC_ROOT_INO
, &proc_root
);
517 root_inode
->i_uid
= 0;
518 root_inode
->i_gid
= 0;
519 s
->s_root
= d_alloc_root(root_inode
);
525 printk("proc_read_super: get root inode failed\n");