2 * linux/fs/proc/inode.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/time.h>
8 #include <linux/proc_fs.h>
9 #include <linux/kernel.h>
10 #include <linux/pid_namespace.h>
12 #include <linux/string.h>
13 #include <linux/stat.h>
14 #include <linux/completion.h>
15 #include <linux/poll.h>
16 #include <linux/printk.h>
17 #include <linux/file.h>
18 #include <linux/limits.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/sysctl.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/mount.h>
25 #include <linux/magic.h>
27 #include <asm/uaccess.h>
31 static void proc_evict_inode(struct inode
*inode
)
33 struct proc_dir_entry
*de
;
34 struct ctl_table_header
*head
;
35 const struct proc_ns_operations
*ns_ops
;
38 truncate_inode_pages(&inode
->i_data
, 0);
41 /* Stop tracking associated processes */
42 put_pid(PROC_I(inode
)->pid
);
44 /* Let go of any associated proc directory entry */
45 de
= PROC_I(inode
)->pde
;
48 head
= PROC_I(inode
)->sysctl
;
50 rcu_assign_pointer(PROC_I(inode
)->sysctl
, NULL
);
51 sysctl_head_put(head
);
53 /* Release any associated namespace */
54 ns_ops
= PROC_I(inode
)->ns
.ns_ops
;
55 ns
= PROC_I(inode
)->ns
.ns
;
60 static struct kmem_cache
* proc_inode_cachep
;
62 static struct inode
*proc_alloc_inode(struct super_block
*sb
)
64 struct proc_inode
*ei
;
67 ei
= (struct proc_inode
*)kmem_cache_alloc(proc_inode_cachep
, GFP_KERNEL
);
72 ei
->op
.proc_get_link
= NULL
;
75 ei
->sysctl_entry
= NULL
;
78 inode
= &ei
->vfs_inode
;
79 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
83 static void proc_i_callback(struct rcu_head
*head
)
85 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
86 kmem_cache_free(proc_inode_cachep
, PROC_I(inode
));
89 static void proc_destroy_inode(struct inode
*inode
)
91 call_rcu(&inode
->i_rcu
, proc_i_callback
);
94 static void init_once(void *foo
)
96 struct proc_inode
*ei
= (struct proc_inode
*) foo
;
98 inode_init_once(&ei
->vfs_inode
);
101 void __init
proc_init_inodecache(void)
103 proc_inode_cachep
= kmem_cache_create("proc_inode_cache",
104 sizeof(struct proc_inode
),
105 0, (SLAB_RECLAIM_ACCOUNT
|
106 SLAB_MEM_SPREAD
|SLAB_PANIC
),
110 static int proc_show_options(struct seq_file
*seq
, struct dentry
*root
)
112 struct super_block
*sb
= root
->d_sb
;
113 struct pid_namespace
*pid
= sb
->s_fs_info
;
115 if (!gid_eq(pid
->pid_gid
, GLOBAL_ROOT_GID
))
116 seq_printf(seq
, ",gid=%u", from_kgid_munged(&init_user_ns
, pid
->pid_gid
));
117 if (pid
->hide_pid
!= 0)
118 seq_printf(seq
, ",hidepid=%u", pid
->hide_pid
);
123 static const struct super_operations proc_sops
= {
124 .alloc_inode
= proc_alloc_inode
,
125 .destroy_inode
= proc_destroy_inode
,
126 .drop_inode
= generic_delete_inode
,
127 .evict_inode
= proc_evict_inode
,
128 .statfs
= simple_statfs
,
129 .remount_fs
= proc_remount
,
130 .show_options
= proc_show_options
,
133 enum {BIAS
= -1U<<31};
135 static inline int use_pde(struct proc_dir_entry
*pde
)
137 return atomic_inc_unless_negative(&pde
->in_use
);
140 static void unuse_pde(struct proc_dir_entry
*pde
)
142 if (atomic_dec_return(&pde
->in_use
) == BIAS
)
143 complete(pde
->pde_unload_completion
);
147 static void close_pdeo(struct proc_dir_entry
*pde
, struct pde_opener
*pdeo
)
150 /* somebody else is doing that, just wait */
151 DECLARE_COMPLETION_ONSTACK(c
);
153 spin_unlock(&pde
->pde_unload_lock
);
154 wait_for_completion(&c
);
155 spin_lock(&pde
->pde_unload_lock
);
159 spin_unlock(&pde
->pde_unload_lock
);
161 pde
->proc_fops
->release(file_inode(file
), file
);
162 spin_lock(&pde
->pde_unload_lock
);
163 list_del_init(&pdeo
->lh
);
170 void proc_entry_rundown(struct proc_dir_entry
*de
)
172 DECLARE_COMPLETION_ONSTACK(c
);
173 /* Wait until all existing callers into module are done. */
174 de
->pde_unload_completion
= &c
;
175 if (atomic_add_return(BIAS
, &de
->in_use
) != BIAS
)
176 wait_for_completion(&c
);
178 spin_lock(&de
->pde_unload_lock
);
179 while (!list_empty(&de
->pde_openers
)) {
180 struct pde_opener
*pdeo
;
181 pdeo
= list_first_entry(&de
->pde_openers
, struct pde_opener
, lh
);
182 close_pdeo(de
, pdeo
);
184 spin_unlock(&de
->pde_unload_lock
);
187 static loff_t
proc_reg_llseek(struct file
*file
, loff_t offset
, int whence
)
189 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
192 loff_t (*llseek
)(struct file
*, loff_t
, int);
193 llseek
= pde
->proc_fops
->llseek
;
195 llseek
= default_llseek
;
196 rv
= llseek(file
, offset
, whence
);
202 static ssize_t
proc_reg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
204 ssize_t (*read
)(struct file
*, char __user
*, size_t, loff_t
*);
205 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
208 read
= pde
->proc_fops
->read
;
210 rv
= read(file
, buf
, count
, ppos
);
216 static ssize_t
proc_reg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
218 ssize_t (*write
)(struct file
*, const char __user
*, size_t, loff_t
*);
219 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
222 write
= pde
->proc_fops
->write
;
224 rv
= write(file
, buf
, count
, ppos
);
230 static unsigned int proc_reg_poll(struct file
*file
, struct poll_table_struct
*pts
)
232 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
233 unsigned int rv
= DEFAULT_POLLMASK
;
234 unsigned int (*poll
)(struct file
*, struct poll_table_struct
*);
236 poll
= pde
->proc_fops
->poll
;
238 rv
= poll(file
, pts
);
244 static long proc_reg_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
246 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
248 long (*ioctl
)(struct file
*, unsigned int, unsigned long);
250 ioctl
= pde
->proc_fops
->unlocked_ioctl
;
252 rv
= ioctl(file
, cmd
, arg
);
259 static long proc_reg_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
261 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
263 long (*compat_ioctl
)(struct file
*, unsigned int, unsigned long);
265 compat_ioctl
= pde
->proc_fops
->compat_ioctl
;
267 rv
= compat_ioctl(file
, cmd
, arg
);
274 static int proc_reg_mmap(struct file
*file
, struct vm_area_struct
*vma
)
276 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
278 int (*mmap
)(struct file
*, struct vm_area_struct
*);
280 mmap
= pde
->proc_fops
->mmap
;
282 rv
= mmap(file
, vma
);
288 static int proc_reg_open(struct inode
*inode
, struct file
*file
)
290 struct proc_dir_entry
*pde
= PDE(inode
);
292 int (*open
)(struct inode
*, struct file
*);
293 int (*release
)(struct inode
*, struct file
*);
294 struct pde_opener
*pdeo
;
297 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
298 * sequence. ->release won't be called because ->proc_fops will be
299 * cleared. Depending on complexity of ->release, consequences vary.
301 * We can't wait for mercy when close will be done for real, it's
302 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
303 * by hand in remove_proc_entry(). For this, save opener's credentials
306 pdeo
= kzalloc(sizeof(struct pde_opener
), GFP_KERNEL
);
314 open
= pde
->proc_fops
->open
;
315 release
= pde
->proc_fops
->release
;
318 rv
= open(inode
, file
);
320 if (rv
== 0 && release
) {
321 /* To know what to release. */
323 /* Strictly for "too late" ->release in proc_reg_release(). */
324 spin_lock(&pde
->pde_unload_lock
);
325 list_add(&pdeo
->lh
, &pde
->pde_openers
);
326 spin_unlock(&pde
->pde_unload_lock
);
334 static int proc_reg_release(struct inode
*inode
, struct file
*file
)
336 struct proc_dir_entry
*pde
= PDE(inode
);
337 struct pde_opener
*pdeo
;
338 spin_lock(&pde
->pde_unload_lock
);
339 list_for_each_entry(pdeo
, &pde
->pde_openers
, lh
) {
340 if (pdeo
->file
== file
) {
341 close_pdeo(pde
, pdeo
);
345 spin_unlock(&pde
->pde_unload_lock
);
349 static const struct file_operations proc_reg_file_ops
= {
350 .llseek
= proc_reg_llseek
,
351 .read
= proc_reg_read
,
352 .write
= proc_reg_write
,
353 .poll
= proc_reg_poll
,
354 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
356 .compat_ioctl
= proc_reg_compat_ioctl
,
358 .mmap
= proc_reg_mmap
,
359 .open
= proc_reg_open
,
360 .release
= proc_reg_release
,
364 static const struct file_operations proc_reg_file_ops_no_compat
= {
365 .llseek
= proc_reg_llseek
,
366 .read
= proc_reg_read
,
367 .write
= proc_reg_write
,
368 .poll
= proc_reg_poll
,
369 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
370 .mmap
= proc_reg_mmap
,
371 .open
= proc_reg_open
,
372 .release
= proc_reg_release
,
376 struct inode
*proc_get_inode(struct super_block
*sb
, struct proc_dir_entry
*de
)
378 struct inode
*inode
= new_inode_pseudo(sb
);
381 inode
->i_ino
= de
->low_ino
;
382 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
383 PROC_I(inode
)->pde
= de
;
386 inode
->i_mode
= de
->mode
;
387 inode
->i_uid
= de
->uid
;
388 inode
->i_gid
= de
->gid
;
391 inode
->i_size
= de
->size
;
393 set_nlink(inode
, de
->nlink
);
394 WARN_ON(!de
->proc_iops
);
395 inode
->i_op
= de
->proc_iops
;
397 if (S_ISREG(inode
->i_mode
)) {
399 if (!de
->proc_fops
->compat_ioctl
)
401 &proc_reg_file_ops_no_compat
;
404 inode
->i_fop
= &proc_reg_file_ops
;
406 inode
->i_fop
= de
->proc_fops
;
414 int proc_fill_super(struct super_block
*s
)
416 struct inode
*root_inode
;
418 s
->s_flags
|= MS_NODIRATIME
| MS_NOSUID
| MS_NOEXEC
;
419 s
->s_blocksize
= 1024;
420 s
->s_blocksize_bits
= 10;
421 s
->s_magic
= PROC_SUPER_MAGIC
;
422 s
->s_op
= &proc_sops
;
426 root_inode
= proc_get_inode(s
, &proc_root
);
428 pr_err("proc_fill_super: get root inode failed\n");
432 s
->s_root
= d_make_root(root_inode
);
434 pr_err("proc_fill_super: allocate dentry failed\n");
438 return proc_setup_self(s
);