2 * proc/fs/generic.c --- generic routines for the proc-fs
4 * This file contains generic proc-fs routines for handling
5 * directories and files.
7 * Copyright (C) 1991, 1992 Linus Torvalds.
8 * Copyright (C) 1997 Theodore Ts'o
11 #include <linux/errno.h>
12 #include <linux/time.h>
13 #include <linux/proc_fs.h>
14 #include <linux/stat.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/printk.h>
19 #include <linux/mount.h>
20 #include <linux/init.h>
21 #include <linux/idr.h>
22 #include <linux/namei.h>
23 #include <linux/bitops.h>
24 #include <linux/spinlock.h>
25 #include <linux/completion.h>
26 #include <asm/uaccess.h>
30 DEFINE_SPINLOCK(proc_subdir_lock
);
32 static int proc_match(unsigned int len
, const char *name
, struct proc_dir_entry
*de
)
34 if (de
->namelen
!= len
)
36 return !memcmp(name
, de
->name
, len
);
39 /* buffer size is one page but our output routines use some slack for overruns */
40 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
43 __proc_file_read(struct file
*file
, char __user
*buf
, size_t nbytes
,
46 struct inode
* inode
= file_inode(file
);
52 struct proc_dir_entry
* dp
;
53 unsigned long long pos
;
56 * Gaah, please just use "seq_file" instead. The legacy /proc
57 * interfaces cut loff_t down to off_t for reads, and ignore
58 * the offset entirely for writes..
61 if (pos
> MAX_NON_LFS
)
63 if (nbytes
> MAX_NON_LFS
- pos
)
64 nbytes
= MAX_NON_LFS
- pos
;
67 if (!(page
= (char*) __get_free_page(GFP_TEMPORARY
)))
70 while ((nbytes
> 0) && !eof
) {
71 count
= min_t(size_t, PROC_BLOCK_SIZE
, nbytes
);
76 * How to be a proc read function
77 * ------------------------------
79 * int f(char *buffer, char **start, off_t offset,
80 * int count, int *peof, void *dat)
82 * Assume that the buffer is "count" bytes in size.
84 * If you know you have supplied all the data you
87 * You have three ways to return data:
88 * 0) Leave *start = NULL. (This is the default.)
89 * Put the data of the requested offset at that
90 * offset within the buffer. Return the number (n)
91 * of bytes there are from the beginning of the
92 * buffer up to the last byte of data. If the
93 * number of supplied bytes (= n - offset) is
94 * greater than zero and you didn't signal eof
95 * and the reader is prepared to take more data
96 * you will be called again with the requested
97 * offset advanced by the number of bytes
98 * absorbed. This interface is useful for files
99 * no larger than the buffer.
100 * 1) Set *start = an unsigned long value less than
101 * the buffer address but greater than zero.
102 * Put the data of the requested offset at the
103 * beginning of the buffer. Return the number of
104 * bytes of data placed there. If this number is
105 * greater than zero and you didn't signal eof
106 * and the reader is prepared to take more data
107 * you will be called again with the requested
108 * offset advanced by *start. This interface is
109 * useful when you have a large file consisting
110 * of a series of blocks which you want to count
111 * and return as wholes.
112 * (Hack by Paul.Russell@rustcorp.com.au)
113 * 2) Set *start = an address within the buffer.
114 * Put the data of the requested offset at *start.
115 * Return the number of bytes of data placed there.
116 * If this number is greater than zero and you
117 * didn't signal eof and the reader is prepared to
118 * take more data you will be called again with the
119 * requested offset advanced by the number of bytes
122 n
= dp
->read_proc(page
, &start
, *ppos
,
123 count
, &eof
, dp
->data
);
127 if (n
== 0) /* end of file */
129 if (n
< 0) { /* error */
136 if (n
> PAGE_SIZE
) /* Apparent buffer overflow */
143 start
= page
+ *ppos
;
144 } else if (start
< page
) {
145 if (n
> PAGE_SIZE
) /* Apparent buffer overflow */
149 * Don't reduce n because doing so might
150 * cut off part of a data block.
152 pr_warn("proc_file_read: count exceeded\n");
154 } else /* start >= page */ {
155 unsigned long startoff
= (unsigned long)(start
- page
);
156 if (n
> (PAGE_SIZE
- startoff
)) /* buffer overflow? */
157 n
= PAGE_SIZE
- startoff
;
162 n
-= copy_to_user(buf
, start
< page
? page
: start
, n
);
169 *ppos
+= start
< page
? (unsigned long)start
: n
;
174 free_page((unsigned long) page
);
179 proc_file_read(struct file
*file
, char __user
*buf
, size_t nbytes
,
182 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
185 spin_lock(&pde
->pde_unload_lock
);
186 if (!pde
->proc_fops
) {
187 spin_unlock(&pde
->pde_unload_lock
);
191 spin_unlock(&pde
->pde_unload_lock
);
193 rv
= __proc_file_read(file
, buf
, nbytes
, ppos
);
200 proc_file_write(struct file
*file
, const char __user
*buffer
,
201 size_t count
, loff_t
*ppos
)
203 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
206 if (pde
->write_proc
) {
207 spin_lock(&pde
->pde_unload_lock
);
208 if (!pde
->proc_fops
) {
209 spin_unlock(&pde
->pde_unload_lock
);
213 spin_unlock(&pde
->pde_unload_lock
);
215 /* FIXME: does this routine need ppos? probably... */
216 rv
= pde
->write_proc(file
, buffer
, count
, pde
->data
);
224 proc_file_lseek(struct file
*file
, loff_t offset
, int orig
)
226 loff_t retval
= -EINVAL
;
229 offset
+= file
->f_pos
;
232 if (offset
< 0 || offset
> MAX_NON_LFS
)
234 file
->f_pos
= retval
= offset
;
239 static const struct file_operations proc_file_operations
= {
240 .llseek
= proc_file_lseek
,
241 .read
= proc_file_read
,
242 .write
= proc_file_write
,
245 static int proc_notify_change(struct dentry
*dentry
, struct iattr
*iattr
)
247 struct inode
*inode
= dentry
->d_inode
;
248 struct proc_dir_entry
*de
= PDE(inode
);
251 error
= inode_change_ok(inode
, iattr
);
255 setattr_copy(inode
, iattr
);
256 mark_inode_dirty(inode
);
258 de
->uid
= inode
->i_uid
;
259 de
->gid
= inode
->i_gid
;
260 de
->mode
= inode
->i_mode
;
264 static int proc_getattr(struct vfsmount
*mnt
, struct dentry
*dentry
,
267 struct inode
*inode
= dentry
->d_inode
;
268 struct proc_dir_entry
*de
= PROC_I(inode
)->pde
;
270 set_nlink(inode
, de
->nlink
);
272 generic_fillattr(inode
, stat
);
276 static const struct inode_operations proc_file_inode_operations
= {
277 .setattr
= proc_notify_change
,
281 * This function parses a name such as "tty/driver/serial", and
282 * returns the struct proc_dir_entry for "/proc/tty/driver", and
283 * returns "serial" in residual.
285 static int __xlate_proc_name(const char *name
, struct proc_dir_entry
**ret
,
286 const char **residual
)
288 const char *cp
= name
, *next
;
289 struct proc_dir_entry
*de
;
297 next
= strchr(cp
, '/');
302 for (de
= de
->subdir
; de
; de
= de
->next
) {
303 if (proc_match(len
, cp
, de
))
307 WARN(1, "name '%s'\n", name
);
317 static int xlate_proc_name(const char *name
, struct proc_dir_entry
**ret
,
318 const char **residual
)
322 spin_lock(&proc_subdir_lock
);
323 rv
= __xlate_proc_name(name
, ret
, residual
);
324 spin_unlock(&proc_subdir_lock
);
328 static DEFINE_IDA(proc_inum_ida
);
329 static DEFINE_SPINLOCK(proc_inum_lock
); /* protects the above */
331 #define PROC_DYNAMIC_FIRST 0xF0000000U
334 * Return an inode number between PROC_DYNAMIC_FIRST and
335 * 0xffffffff, or zero on failure.
337 int proc_alloc_inum(unsigned int *inum
)
343 if (!ida_pre_get(&proc_inum_ida
, GFP_KERNEL
))
346 spin_lock_irq(&proc_inum_lock
);
347 error
= ida_get_new(&proc_inum_ida
, &i
);
348 spin_unlock_irq(&proc_inum_lock
);
349 if (error
== -EAGAIN
)
354 if (i
> UINT_MAX
- PROC_DYNAMIC_FIRST
) {
355 spin_lock_irq(&proc_inum_lock
);
356 ida_remove(&proc_inum_ida
, i
);
357 spin_unlock_irq(&proc_inum_lock
);
360 *inum
= PROC_DYNAMIC_FIRST
+ i
;
364 void proc_free_inum(unsigned int inum
)
367 spin_lock_irqsave(&proc_inum_lock
, flags
);
368 ida_remove(&proc_inum_ida
, inum
- PROC_DYNAMIC_FIRST
);
369 spin_unlock_irqrestore(&proc_inum_lock
, flags
);
372 static void *proc_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
374 nd_set_link(nd
, PDE(dentry
->d_inode
)->data
);
378 static const struct inode_operations proc_link_inode_operations
= {
379 .readlink
= generic_readlink
,
380 .follow_link
= proc_follow_link
,
384 * As some entries in /proc are volatile, we want to
385 * get rid of unused dentries. This could be made
386 * smarter: we could keep a "volatile" flag in the
387 * inode to indicate which ones to keep.
389 static int proc_delete_dentry(const struct dentry
* dentry
)
394 static const struct dentry_operations proc_dentry_operations
=
396 .d_delete
= proc_delete_dentry
,
400 * Don't create negative dentries here, return -ENOENT by hand
403 struct dentry
*proc_lookup_de(struct proc_dir_entry
*de
, struct inode
*dir
,
404 struct dentry
*dentry
)
408 spin_lock(&proc_subdir_lock
);
409 for (de
= de
->subdir
; de
; de
= de
->next
) {
410 if (de
->namelen
!= dentry
->d_name
.len
)
412 if (!memcmp(dentry
->d_name
.name
, de
->name
, de
->namelen
)) {
414 spin_unlock(&proc_subdir_lock
);
415 inode
= proc_get_inode(dir
->i_sb
, de
);
417 return ERR_PTR(-ENOMEM
);
418 d_set_d_op(dentry
, &proc_dentry_operations
);
419 d_add(dentry
, inode
);
423 spin_unlock(&proc_subdir_lock
);
424 return ERR_PTR(-ENOENT
);
427 struct dentry
*proc_lookup(struct inode
*dir
, struct dentry
*dentry
,
430 return proc_lookup_de(PDE(dir
), dir
, dentry
);
434 * This returns non-zero if at EOF, so that the /proc
435 * root directory can use this and check if it should
436 * continue with the <pid> entries..
438 * Note that the VFS-layer doesn't care about the return
439 * value of the readdir() call, as long as it's non-negative
442 int proc_readdir_de(struct proc_dir_entry
*de
, struct file
*filp
, void *dirent
,
447 struct inode
*inode
= file_inode(filp
);
454 if (filldir(dirent
, ".", 1, i
, ino
, DT_DIR
) < 0)
460 if (filldir(dirent
, "..", 2, i
,
461 parent_ino(filp
->f_path
.dentry
),
468 spin_lock(&proc_subdir_lock
);
474 spin_unlock(&proc_subdir_lock
);
484 struct proc_dir_entry
*next
;
486 /* filldir passes info to user space */
488 spin_unlock(&proc_subdir_lock
);
489 if (filldir(dirent
, de
->name
, de
->namelen
, filp
->f_pos
,
490 de
->low_ino
, de
->mode
>> 12) < 0) {
494 spin_lock(&proc_subdir_lock
);
500 spin_unlock(&proc_subdir_lock
);
507 int proc_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
509 struct inode
*inode
= file_inode(filp
);
511 return proc_readdir_de(PDE(inode
), filp
, dirent
, filldir
);
515 * These are the generic /proc directory operations. They
516 * use the in-memory "struct proc_dir_entry" tree to parse
517 * the /proc directory.
519 static const struct file_operations proc_dir_operations
= {
520 .llseek
= generic_file_llseek
,
521 .read
= generic_read_dir
,
522 .readdir
= proc_readdir
,
526 * proc directories can do almost nothing..
528 static const struct inode_operations proc_dir_inode_operations
= {
529 .lookup
= proc_lookup
,
530 .getattr
= proc_getattr
,
531 .setattr
= proc_notify_change
,
534 static int proc_register(struct proc_dir_entry
* dir
, struct proc_dir_entry
* dp
)
536 struct proc_dir_entry
*tmp
;
539 ret
= proc_alloc_inum(&dp
->low_ino
);
543 if (S_ISDIR(dp
->mode
)) {
544 if (dp
->proc_iops
== NULL
) {
545 dp
->proc_fops
= &proc_dir_operations
;
546 dp
->proc_iops
= &proc_dir_inode_operations
;
549 } else if (S_ISLNK(dp
->mode
)) {
550 if (dp
->proc_iops
== NULL
)
551 dp
->proc_iops
= &proc_link_inode_operations
;
552 } else if (S_ISREG(dp
->mode
)) {
553 if (dp
->proc_fops
== NULL
)
554 dp
->proc_fops
= &proc_file_operations
;
555 if (dp
->proc_iops
== NULL
)
556 dp
->proc_iops
= &proc_file_inode_operations
;
559 spin_lock(&proc_subdir_lock
);
561 for (tmp
= dir
->subdir
; tmp
; tmp
= tmp
->next
)
562 if (strcmp(tmp
->name
, dp
->name
) == 0) {
563 WARN(1, "proc_dir_entry '%s/%s' already registered\n",
564 dir
->name
, dp
->name
);
568 dp
->next
= dir
->subdir
;
571 spin_unlock(&proc_subdir_lock
);
576 static struct proc_dir_entry
*__proc_create(struct proc_dir_entry
**parent
,
581 struct proc_dir_entry
*ent
= NULL
;
582 const char *fn
= name
;
585 /* make sure name is valid */
586 if (!name
|| !strlen(name
))
589 if (xlate_proc_name(name
, parent
, &fn
) != 0)
592 /* At this point there must not be any '/' characters beyond *fn */
598 ent
= kzalloc(sizeof(struct proc_dir_entry
) + len
+ 1, GFP_KERNEL
);
602 memcpy(ent
->name
, fn
, len
+ 1);
606 atomic_set(&ent
->count
, 1);
607 spin_lock_init(&ent
->pde_unload_lock
);
608 INIT_LIST_HEAD(&ent
->pde_openers
);
613 struct proc_dir_entry
*proc_symlink(const char *name
,
614 struct proc_dir_entry
*parent
, const char *dest
)
616 struct proc_dir_entry
*ent
;
618 ent
= __proc_create(&parent
, name
,
619 (S_IFLNK
| S_IRUGO
| S_IWUGO
| S_IXUGO
),1);
622 ent
->data
= kmalloc((ent
->size
=strlen(dest
))+1, GFP_KERNEL
);
624 strcpy((char*)ent
->data
,dest
);
625 if (proc_register(parent
, ent
) < 0) {
637 EXPORT_SYMBOL(proc_symlink
);
639 struct proc_dir_entry
*proc_mkdir_mode(const char *name
, umode_t mode
,
640 struct proc_dir_entry
*parent
)
642 struct proc_dir_entry
*ent
;
644 ent
= __proc_create(&parent
, name
, S_IFDIR
| mode
, 2);
646 if (proc_register(parent
, ent
) < 0) {
653 EXPORT_SYMBOL(proc_mkdir_mode
);
655 struct proc_dir_entry
*proc_net_mkdir(struct net
*net
, const char *name
,
656 struct proc_dir_entry
*parent
)
658 struct proc_dir_entry
*ent
;
660 ent
= __proc_create(&parent
, name
, S_IFDIR
| S_IRUGO
| S_IXUGO
, 2);
663 if (proc_register(parent
, ent
) < 0) {
670 EXPORT_SYMBOL_GPL(proc_net_mkdir
);
672 struct proc_dir_entry
*proc_mkdir(const char *name
,
673 struct proc_dir_entry
*parent
)
675 return proc_mkdir_mode(name
, S_IRUGO
| S_IXUGO
, parent
);
677 EXPORT_SYMBOL(proc_mkdir
);
679 struct proc_dir_entry
*create_proc_entry(const char *name
, umode_t mode
,
680 struct proc_dir_entry
*parent
)
682 struct proc_dir_entry
*ent
;
686 if ((mode
& S_IALLUGO
) == 0)
687 mode
|= S_IRUGO
| S_IXUGO
;
690 if ((mode
& S_IFMT
) == 0)
692 if ((mode
& S_IALLUGO
) == 0)
697 ent
= __proc_create(&parent
, name
, mode
, nlink
);
699 if (proc_register(parent
, ent
) < 0) {
706 EXPORT_SYMBOL(create_proc_entry
);
708 struct proc_dir_entry
*proc_create_data(const char *name
, umode_t mode
,
709 struct proc_dir_entry
*parent
,
710 const struct file_operations
*proc_fops
,
713 struct proc_dir_entry
*pde
;
717 if ((mode
& S_IALLUGO
) == 0)
718 mode
|= S_IRUGO
| S_IXUGO
;
721 if ((mode
& S_IFMT
) == 0)
723 if ((mode
& S_IALLUGO
) == 0)
728 pde
= __proc_create(&parent
, name
, mode
, nlink
);
731 pde
->proc_fops
= proc_fops
;
733 if (proc_register(parent
, pde
) < 0)
741 EXPORT_SYMBOL(proc_create_data
);
743 static void free_proc_entry(struct proc_dir_entry
*de
)
745 proc_free_inum(de
->low_ino
);
747 if (S_ISLNK(de
->mode
))
752 void pde_put(struct proc_dir_entry
*pde
)
754 if (atomic_dec_and_test(&pde
->count
))
755 free_proc_entry(pde
);
758 static void entry_rundown(struct proc_dir_entry
*de
)
760 spin_lock(&de
->pde_unload_lock
);
762 * Stop accepting new callers into module. If you're
763 * dynamically allocating ->proc_fops, save a pointer somewhere.
765 de
->proc_fops
= NULL
;
766 /* Wait until all existing callers into module are done. */
767 if (de
->pde_users
> 0) {
768 DECLARE_COMPLETION_ONSTACK(c
);
770 if (!de
->pde_unload_completion
)
771 de
->pde_unload_completion
= &c
;
773 spin_unlock(&de
->pde_unload_lock
);
775 wait_for_completion(de
->pde_unload_completion
);
777 spin_lock(&de
->pde_unload_lock
);
780 while (!list_empty(&de
->pde_openers
)) {
781 struct pde_opener
*pdeo
;
783 pdeo
= list_first_entry(&de
->pde_openers
, struct pde_opener
, lh
);
785 spin_unlock(&de
->pde_unload_lock
);
786 pdeo
->release(pdeo
->inode
, pdeo
->file
);
788 spin_lock(&de
->pde_unload_lock
);
790 spin_unlock(&de
->pde_unload_lock
);
794 * Remove a /proc entry and free it if it's not currently in use.
796 void remove_proc_entry(const char *name
, struct proc_dir_entry
*parent
)
798 struct proc_dir_entry
**p
;
799 struct proc_dir_entry
*de
= NULL
;
800 const char *fn
= name
;
803 spin_lock(&proc_subdir_lock
);
804 if (__xlate_proc_name(name
, &parent
, &fn
) != 0) {
805 spin_unlock(&proc_subdir_lock
);
810 for (p
= &parent
->subdir
; *p
; p
=&(*p
)->next
) {
811 if (proc_match(len
, fn
, *p
)) {
818 spin_unlock(&proc_subdir_lock
);
820 WARN(1, "name '%s'\n", name
);
826 if (S_ISDIR(de
->mode
))
829 WARN(de
->subdir
, "%s: removing non-empty directory "
830 "'%s/%s', leaking at least '%s'\n", __func__
,
831 de
->parent
->name
, de
->name
, de
->subdir
->name
);
834 EXPORT_SYMBOL(remove_proc_entry
);
836 int remove_proc_subtree(const char *name
, struct proc_dir_entry
*parent
)
838 struct proc_dir_entry
**p
;
839 struct proc_dir_entry
*root
= NULL
, *de
, *next
;
840 const char *fn
= name
;
843 spin_lock(&proc_subdir_lock
);
844 if (__xlate_proc_name(name
, &parent
, &fn
) != 0) {
845 spin_unlock(&proc_subdir_lock
);
850 for (p
= &parent
->subdir
; *p
; p
=&(*p
)->next
) {
851 if (proc_match(len
, fn
, *p
)) {
859 spin_unlock(&proc_subdir_lock
);
866 de
->subdir
= next
->next
;
871 spin_unlock(&proc_subdir_lock
);
875 if (S_ISDIR(de
->mode
))
882 spin_lock(&proc_subdir_lock
);
888 EXPORT_SYMBOL(remove_proc_subtree
);