[MIPS] save_context_stack fix
[linux-2.6/linux-2.6-openrd.git] / fs / proc / base.c
blob26a8f8416b79895af81b2af3a202ddc3dc17b020
1 /*
2 * linux/fs/proc/base.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * proc base directory handling functions
8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
9 * Instead of using magical inumbers to determine the kind of object
10 * we allocate and fill in-core inodes upon lookup. They don't even
11 * go into icache. We cache the reference to task_struct upon lookup too.
12 * Eventually it should become a filesystem in its own. We don't use the
13 * rest of procfs anymore.
16 * Changelog:
17 * 17-Jan-2005
18 * Allan Bezerra
19 * Bruna Moreira <bruna.moreira@indt.org.br>
20 * Edjard Mota <edjard.mota@indt.org.br>
21 * Ilias Biris <ilias.biris@indt.org.br>
22 * Mauricio Lin <mauricio.lin@indt.org.br>
24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
26 * A new process specific entry (smaps) included in /proc. It shows the
27 * size of rss for each memory area. The maps entry lacks information
28 * about physical memory size (rss) for each mapped file, i.e.,
29 * rss information for executables and library files.
30 * This additional information is useful for any tools that need to know
31 * about physical memory consumption for a process specific library.
33 * Changelog:
34 * 21-Feb-2005
35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
36 * Pud inclusion in the page table walking.
38 * ChangeLog:
39 * 10-Mar-2005
40 * 10LE Instituto Nokia de Tecnologia - INdT:
41 * A better way to walks through the page table as suggested by Hugh Dickins.
43 * Simo Piiroinen <simo.piiroinen@nokia.com>:
44 * Smaps information related to shared, private, clean and dirty pages.
46 * Paul Mundt <paul.mundt@nokia.com>:
47 * Overall revision about smaps.
50 #include <asm/uaccess.h>
52 #include <linux/errno.h>
53 #include <linux/time.h>
54 #include <linux/proc_fs.h>
55 #include <linux/stat.h>
56 #include <linux/init.h>
57 #include <linux/capability.h>
58 #include <linux/file.h>
59 #include <linux/string.h>
60 #include <linux/seq_file.h>
61 #include <linux/namei.h>
62 #include <linux/namespace.h>
63 #include <linux/mm.h>
64 #include <linux/smp_lock.h>
65 #include <linux/rcupdate.h>
66 #include <linux/kallsyms.h>
67 #include <linux/mount.h>
68 #include <linux/security.h>
69 #include <linux/ptrace.h>
70 #include <linux/seccomp.h>
71 #include <linux/cpuset.h>
72 #include <linux/audit.h>
73 #include <linux/poll.h>
74 #include <linux/nsproxy.h>
75 #include "internal.h"
77 /* NOTE:
78 * Implementing inode permission operations in /proc is almost
79 * certainly an error. Permission checks need to happen during
80 * each system call not at open time. The reason is that most of
81 * what we wish to check for permissions in /proc varies at runtime.
83 * The classic example of a problem is opening file descriptors
84 * in /proc for a task before it execs a suid executable.
88 /* Worst case buffer size needed for holding an integer. */
89 #define PROC_NUMBUF 13
91 struct pid_entry {
92 int len;
93 char *name;
94 mode_t mode;
95 struct inode_operations *iop;
96 struct file_operations *fop;
97 union proc_op op;
100 #define NOD(NAME, MODE, IOP, FOP, OP) { \
101 .len = sizeof(NAME) - 1, \
102 .name = (NAME), \
103 .mode = MODE, \
104 .iop = IOP, \
105 .fop = FOP, \
106 .op = OP, \
109 #define DIR(NAME, MODE, OTYPE) \
110 NOD(NAME, (S_IFDIR|(MODE)), \
111 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \
112 {} )
113 #define LNK(NAME, OTYPE) \
114 NOD(NAME, (S_IFLNK|S_IRWXUGO), \
115 &proc_pid_link_inode_operations, NULL, \
116 { .proc_get_link = &proc_##OTYPE##_link } )
117 #define REG(NAME, MODE, OTYPE) \
118 NOD(NAME, (S_IFREG|(MODE)), NULL, \
119 &proc_##OTYPE##_operations, {})
120 #define INF(NAME, MODE, OTYPE) \
121 NOD(NAME, (S_IFREG|(MODE)), \
122 NULL, &proc_info_file_operations, \
123 { .proc_read = &proc_##OTYPE } )
125 static struct fs_struct *get_fs_struct(struct task_struct *task)
127 struct fs_struct *fs;
128 task_lock(task);
129 fs = task->fs;
130 if(fs)
131 atomic_inc(&fs->count);
132 task_unlock(task);
133 return fs;
136 static int get_nr_threads(struct task_struct *tsk)
138 /* Must be called with the rcu_read_lock held */
139 unsigned long flags;
140 int count = 0;
142 if (lock_task_sighand(tsk, &flags)) {
143 count = atomic_read(&tsk->signal->count);
144 unlock_task_sighand(tsk, &flags);
146 return count;
149 static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
151 struct task_struct *task = get_proc_task(inode);
152 struct fs_struct *fs = NULL;
153 int result = -ENOENT;
155 if (task) {
156 fs = get_fs_struct(task);
157 put_task_struct(task);
159 if (fs) {
160 read_lock(&fs->lock);
161 *mnt = mntget(fs->pwdmnt);
162 *dentry = dget(fs->pwd);
163 read_unlock(&fs->lock);
164 result = 0;
165 put_fs_struct(fs);
167 return result;
170 static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
172 struct task_struct *task = get_proc_task(inode);
173 struct fs_struct *fs = NULL;
174 int result = -ENOENT;
176 if (task) {
177 fs = get_fs_struct(task);
178 put_task_struct(task);
180 if (fs) {
181 read_lock(&fs->lock);
182 *mnt = mntget(fs->rootmnt);
183 *dentry = dget(fs->root);
184 read_unlock(&fs->lock);
185 result = 0;
186 put_fs_struct(fs);
188 return result;
191 #define MAY_PTRACE(task) \
192 (task == current || \
193 (task->parent == current && \
194 (task->ptrace & PT_PTRACED) && \
195 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
196 security_ptrace(current,task) == 0))
198 static int proc_pid_environ(struct task_struct *task, char * buffer)
200 int res = 0;
201 struct mm_struct *mm = get_task_mm(task);
202 if (mm) {
203 unsigned int len = mm->env_end - mm->env_start;
204 if (len > PAGE_SIZE)
205 len = PAGE_SIZE;
206 res = access_process_vm(task, mm->env_start, buffer, len, 0);
207 if (!ptrace_may_attach(task))
208 res = -ESRCH;
209 mmput(mm);
211 return res;
214 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
216 int res = 0;
217 unsigned int len;
218 struct mm_struct *mm = get_task_mm(task);
219 if (!mm)
220 goto out;
221 if (!mm->arg_end)
222 goto out_mm; /* Shh! No looking before we're done */
224 len = mm->arg_end - mm->arg_start;
226 if (len > PAGE_SIZE)
227 len = PAGE_SIZE;
229 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
231 // If the nul at the end of args has been overwritten, then
232 // assume application is using setproctitle(3).
233 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
234 len = strnlen(buffer, res);
235 if (len < res) {
236 res = len;
237 } else {
238 len = mm->env_end - mm->env_start;
239 if (len > PAGE_SIZE - res)
240 len = PAGE_SIZE - res;
241 res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
242 res = strnlen(buffer, res);
245 out_mm:
246 mmput(mm);
247 out:
248 return res;
251 static int proc_pid_auxv(struct task_struct *task, char *buffer)
253 int res = 0;
254 struct mm_struct *mm = get_task_mm(task);
255 if (mm) {
256 unsigned int nwords = 0;
258 nwords += 2;
259 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
260 res = nwords * sizeof(mm->saved_auxv[0]);
261 if (res > PAGE_SIZE)
262 res = PAGE_SIZE;
263 memcpy(buffer, mm->saved_auxv, res);
264 mmput(mm);
266 return res;
270 #ifdef CONFIG_KALLSYMS
272 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
273 * Returns the resolved symbol. If that fails, simply return the address.
275 static int proc_pid_wchan(struct task_struct *task, char *buffer)
277 char *modname;
278 const char *sym_name;
279 unsigned long wchan, size, offset;
280 char namebuf[KSYM_NAME_LEN+1];
282 wchan = get_wchan(task);
284 sym_name = kallsyms_lookup(wchan, &size, &offset, &modname, namebuf);
285 if (sym_name)
286 return sprintf(buffer, "%s", sym_name);
287 return sprintf(buffer, "%lu", wchan);
289 #endif /* CONFIG_KALLSYMS */
291 #ifdef CONFIG_SCHEDSTATS
293 * Provides /proc/PID/schedstat
295 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
297 return sprintf(buffer, "%lu %lu %lu\n",
298 task->sched_info.cpu_time,
299 task->sched_info.run_delay,
300 task->sched_info.pcnt);
302 #endif
304 /* The badness from the OOM killer */
305 unsigned long badness(struct task_struct *p, unsigned long uptime);
306 static int proc_oom_score(struct task_struct *task, char *buffer)
308 unsigned long points;
309 struct timespec uptime;
311 do_posix_clock_monotonic_gettime(&uptime);
312 points = badness(task, uptime.tv_sec);
313 return sprintf(buffer, "%lu\n", points);
316 /************************************************************************/
317 /* Here the fs part begins */
318 /************************************************************************/
320 /* permission checks */
321 static int proc_fd_access_allowed(struct inode *inode)
323 struct task_struct *task;
324 int allowed = 0;
325 /* Allow access to a task's file descriptors if it is us or we
326 * may use ptrace attach to the process and find out that
327 * information.
329 task = get_proc_task(inode);
330 if (task) {
331 allowed = ptrace_may_attach(task);
332 put_task_struct(task);
334 return allowed;
337 static int proc_setattr(struct dentry *dentry, struct iattr *attr)
339 int error;
340 struct inode *inode = dentry->d_inode;
342 if (attr->ia_valid & ATTR_MODE)
343 return -EPERM;
345 error = inode_change_ok(inode, attr);
346 if (!error) {
347 error = security_inode_setattr(dentry, attr);
348 if (!error)
349 error = inode_setattr(inode, attr);
351 return error;
354 static struct inode_operations proc_def_inode_operations = {
355 .setattr = proc_setattr,
358 extern struct seq_operations mounts_op;
359 struct proc_mounts {
360 struct seq_file m;
361 int event;
364 static int mounts_open(struct inode *inode, struct file *file)
366 struct task_struct *task = get_proc_task(inode);
367 struct namespace *namespace = NULL;
368 struct proc_mounts *p;
369 int ret = -EINVAL;
371 if (task) {
372 task_lock(task);
373 namespace = task->nsproxy->namespace;
374 if (namespace)
375 get_namespace(namespace);
376 task_unlock(task);
377 put_task_struct(task);
380 if (namespace) {
381 ret = -ENOMEM;
382 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
383 if (p) {
384 file->private_data = &p->m;
385 ret = seq_open(file, &mounts_op);
386 if (!ret) {
387 p->m.private = namespace;
388 p->event = namespace->event;
389 return 0;
391 kfree(p);
393 put_namespace(namespace);
395 return ret;
398 static int mounts_release(struct inode *inode, struct file *file)
400 struct seq_file *m = file->private_data;
401 struct namespace *namespace = m->private;
402 put_namespace(namespace);
403 return seq_release(inode, file);
406 static unsigned mounts_poll(struct file *file, poll_table *wait)
408 struct proc_mounts *p = file->private_data;
409 struct namespace *ns = p->m.private;
410 unsigned res = 0;
412 poll_wait(file, &ns->poll, wait);
414 spin_lock(&vfsmount_lock);
415 if (p->event != ns->event) {
416 p->event = ns->event;
417 res = POLLERR;
419 spin_unlock(&vfsmount_lock);
421 return res;
424 static struct file_operations proc_mounts_operations = {
425 .open = mounts_open,
426 .read = seq_read,
427 .llseek = seq_lseek,
428 .release = mounts_release,
429 .poll = mounts_poll,
432 extern struct seq_operations mountstats_op;
433 static int mountstats_open(struct inode *inode, struct file *file)
435 int ret = seq_open(file, &mountstats_op);
437 if (!ret) {
438 struct seq_file *m = file->private_data;
439 struct namespace *namespace = NULL;
440 struct task_struct *task = get_proc_task(inode);
442 if (task) {
443 task_lock(task);
444 namespace = task->nsproxy->namespace;
445 if (namespace)
446 get_namespace(namespace);
447 task_unlock(task);
448 put_task_struct(task);
451 if (namespace)
452 m->private = namespace;
453 else {
454 seq_release(inode, file);
455 ret = -EINVAL;
458 return ret;
461 static struct file_operations proc_mountstats_operations = {
462 .open = mountstats_open,
463 .read = seq_read,
464 .llseek = seq_lseek,
465 .release = mounts_release,
468 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
470 static ssize_t proc_info_read(struct file * file, char __user * buf,
471 size_t count, loff_t *ppos)
473 struct inode * inode = file->f_dentry->d_inode;
474 unsigned long page;
475 ssize_t length;
476 struct task_struct *task = get_proc_task(inode);
478 length = -ESRCH;
479 if (!task)
480 goto out_no_task;
482 if (count > PROC_BLOCK_SIZE)
483 count = PROC_BLOCK_SIZE;
485 length = -ENOMEM;
486 if (!(page = __get_free_page(GFP_KERNEL)))
487 goto out;
489 length = PROC_I(inode)->op.proc_read(task, (char*)page);
491 if (length >= 0)
492 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
493 free_page(page);
494 out:
495 put_task_struct(task);
496 out_no_task:
497 return length;
500 static struct file_operations proc_info_file_operations = {
501 .read = proc_info_read,
504 static int mem_open(struct inode* inode, struct file* file)
506 file->private_data = (void*)((long)current->self_exec_id);
507 return 0;
510 static ssize_t mem_read(struct file * file, char __user * buf,
511 size_t count, loff_t *ppos)
513 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
514 char *page;
515 unsigned long src = *ppos;
516 int ret = -ESRCH;
517 struct mm_struct *mm;
519 if (!task)
520 goto out_no_task;
522 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
523 goto out;
525 ret = -ENOMEM;
526 page = (char *)__get_free_page(GFP_USER);
527 if (!page)
528 goto out;
530 ret = 0;
532 mm = get_task_mm(task);
533 if (!mm)
534 goto out_free;
536 ret = -EIO;
538 if (file->private_data != (void*)((long)current->self_exec_id))
539 goto out_put;
541 ret = 0;
543 while (count > 0) {
544 int this_len, retval;
546 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
547 retval = access_process_vm(task, src, page, this_len, 0);
548 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
549 if (!ret)
550 ret = -EIO;
551 break;
554 if (copy_to_user(buf, page, retval)) {
555 ret = -EFAULT;
556 break;
559 ret += retval;
560 src += retval;
561 buf += retval;
562 count -= retval;
564 *ppos = src;
566 out_put:
567 mmput(mm);
568 out_free:
569 free_page((unsigned long) page);
570 out:
571 put_task_struct(task);
572 out_no_task:
573 return ret;
576 #define mem_write NULL
578 #ifndef mem_write
579 /* This is a security hazard */
580 static ssize_t mem_write(struct file * file, const char * buf,
581 size_t count, loff_t *ppos)
583 int copied;
584 char *page;
585 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
586 unsigned long dst = *ppos;
588 copied = -ESRCH;
589 if (!task)
590 goto out_no_task;
592 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
593 goto out;
595 copied = -ENOMEM;
596 page = (char *)__get_free_page(GFP_USER);
597 if (!page)
598 goto out;
600 copied = 0;
601 while (count > 0) {
602 int this_len, retval;
604 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
605 if (copy_from_user(page, buf, this_len)) {
606 copied = -EFAULT;
607 break;
609 retval = access_process_vm(task, dst, page, this_len, 1);
610 if (!retval) {
611 if (!copied)
612 copied = -EIO;
613 break;
615 copied += retval;
616 buf += retval;
617 dst += retval;
618 count -= retval;
620 *ppos = dst;
621 free_page((unsigned long) page);
622 out:
623 put_task_struct(task);
624 out_no_task:
625 return copied;
627 #endif
629 static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
631 switch (orig) {
632 case 0:
633 file->f_pos = offset;
634 break;
635 case 1:
636 file->f_pos += offset;
637 break;
638 default:
639 return -EINVAL;
641 force_successful_syscall_return();
642 return file->f_pos;
645 static struct file_operations proc_mem_operations = {
646 .llseek = mem_lseek,
647 .read = mem_read,
648 .write = mem_write,
649 .open = mem_open,
652 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
653 size_t count, loff_t *ppos)
655 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
656 char buffer[PROC_NUMBUF];
657 size_t len;
658 int oom_adjust;
659 loff_t __ppos = *ppos;
661 if (!task)
662 return -ESRCH;
663 oom_adjust = task->oomkilladj;
664 put_task_struct(task);
666 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
667 if (__ppos >= len)
668 return 0;
669 if (count > len-__ppos)
670 count = len-__ppos;
671 if (copy_to_user(buf, buffer + __ppos, count))
672 return -EFAULT;
673 *ppos = __ppos + count;
674 return count;
677 static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
678 size_t count, loff_t *ppos)
680 struct task_struct *task;
681 char buffer[PROC_NUMBUF], *end;
682 int oom_adjust;
684 if (!capable(CAP_SYS_RESOURCE))
685 return -EPERM;
686 memset(buffer, 0, sizeof(buffer));
687 if (count > sizeof(buffer) - 1)
688 count = sizeof(buffer) - 1;
689 if (copy_from_user(buffer, buf, count))
690 return -EFAULT;
691 oom_adjust = simple_strtol(buffer, &end, 0);
692 if ((oom_adjust < -16 || oom_adjust > 15) && oom_adjust != OOM_DISABLE)
693 return -EINVAL;
694 if (*end == '\n')
695 end++;
696 task = get_proc_task(file->f_dentry->d_inode);
697 if (!task)
698 return -ESRCH;
699 task->oomkilladj = oom_adjust;
700 put_task_struct(task);
701 if (end - buffer == 0)
702 return -EIO;
703 return end - buffer;
706 static struct file_operations proc_oom_adjust_operations = {
707 .read = oom_adjust_read,
708 .write = oom_adjust_write,
711 #ifdef CONFIG_AUDITSYSCALL
712 #define TMPBUFLEN 21
713 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
714 size_t count, loff_t *ppos)
716 struct inode * inode = file->f_dentry->d_inode;
717 struct task_struct *task = get_proc_task(inode);
718 ssize_t length;
719 char tmpbuf[TMPBUFLEN];
721 if (!task)
722 return -ESRCH;
723 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
724 audit_get_loginuid(task->audit_context));
725 put_task_struct(task);
726 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
729 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
730 size_t count, loff_t *ppos)
732 struct inode * inode = file->f_dentry->d_inode;
733 char *page, *tmp;
734 ssize_t length;
735 uid_t loginuid;
737 if (!capable(CAP_AUDIT_CONTROL))
738 return -EPERM;
740 if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
741 return -EPERM;
743 if (count >= PAGE_SIZE)
744 count = PAGE_SIZE - 1;
746 if (*ppos != 0) {
747 /* No partial writes. */
748 return -EINVAL;
750 page = (char*)__get_free_page(GFP_USER);
751 if (!page)
752 return -ENOMEM;
753 length = -EFAULT;
754 if (copy_from_user(page, buf, count))
755 goto out_free_page;
757 page[count] = '\0';
758 loginuid = simple_strtoul(page, &tmp, 10);
759 if (tmp == page) {
760 length = -EINVAL;
761 goto out_free_page;
764 length = audit_set_loginuid(current, loginuid);
765 if (likely(length == 0))
766 length = count;
768 out_free_page:
769 free_page((unsigned long) page);
770 return length;
773 static struct file_operations proc_loginuid_operations = {
774 .read = proc_loginuid_read,
775 .write = proc_loginuid_write,
777 #endif
779 #ifdef CONFIG_SECCOMP
780 static ssize_t seccomp_read(struct file *file, char __user *buf,
781 size_t count, loff_t *ppos)
783 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
784 char __buf[20];
785 loff_t __ppos = *ppos;
786 size_t len;
788 if (!tsk)
789 return -ESRCH;
790 /* no need to print the trailing zero, so use only len */
791 len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
792 put_task_struct(tsk);
793 if (__ppos >= len)
794 return 0;
795 if (count > len - __ppos)
796 count = len - __ppos;
797 if (copy_to_user(buf, __buf + __ppos, count))
798 return -EFAULT;
799 *ppos = __ppos + count;
800 return count;
803 static ssize_t seccomp_write(struct file *file, const char __user *buf,
804 size_t count, loff_t *ppos)
806 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
807 char __buf[20], *end;
808 unsigned int seccomp_mode;
809 ssize_t result;
811 result = -ESRCH;
812 if (!tsk)
813 goto out_no_task;
815 /* can set it only once to be even more secure */
816 result = -EPERM;
817 if (unlikely(tsk->seccomp.mode))
818 goto out;
820 result = -EFAULT;
821 memset(__buf, 0, sizeof(__buf));
822 count = min(count, sizeof(__buf) - 1);
823 if (copy_from_user(__buf, buf, count))
824 goto out;
826 seccomp_mode = simple_strtoul(__buf, &end, 0);
827 if (*end == '\n')
828 end++;
829 result = -EINVAL;
830 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
831 tsk->seccomp.mode = seccomp_mode;
832 set_tsk_thread_flag(tsk, TIF_SECCOMP);
833 } else
834 goto out;
835 result = -EIO;
836 if (unlikely(!(end - __buf)))
837 goto out;
838 result = end - __buf;
839 out:
840 put_task_struct(tsk);
841 out_no_task:
842 return result;
845 static struct file_operations proc_seccomp_operations = {
846 .read = seccomp_read,
847 .write = seccomp_write,
849 #endif /* CONFIG_SECCOMP */
851 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
853 struct inode *inode = dentry->d_inode;
854 int error = -EACCES;
856 /* We don't need a base pointer in the /proc filesystem */
857 path_release(nd);
859 /* Are we allowed to snoop on the tasks file descriptors? */
860 if (!proc_fd_access_allowed(inode))
861 goto out;
863 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
864 nd->last_type = LAST_BIND;
865 out:
866 return ERR_PTR(error);
869 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
870 char __user *buffer, int buflen)
872 struct inode * inode;
873 char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
874 int len;
876 if (!tmp)
877 return -ENOMEM;
879 inode = dentry->d_inode;
880 path = d_path(dentry, mnt, tmp, PAGE_SIZE);
881 len = PTR_ERR(path);
882 if (IS_ERR(path))
883 goto out;
884 len = tmp + PAGE_SIZE - 1 - path;
886 if (len > buflen)
887 len = buflen;
888 if (copy_to_user(buffer, path, len))
889 len = -EFAULT;
890 out:
891 free_page((unsigned long)tmp);
892 return len;
895 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
897 int error = -EACCES;
898 struct inode *inode = dentry->d_inode;
899 struct dentry *de;
900 struct vfsmount *mnt = NULL;
902 /* Are we allowed to snoop on the tasks file descriptors? */
903 if (!proc_fd_access_allowed(inode))
904 goto out;
906 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
907 if (error)
908 goto out;
910 error = do_proc_readlink(de, mnt, buffer, buflen);
911 dput(de);
912 mntput(mnt);
913 out:
914 return error;
917 static struct inode_operations proc_pid_link_inode_operations = {
918 .readlink = proc_pid_readlink,
919 .follow_link = proc_pid_follow_link,
920 .setattr = proc_setattr,
924 /* building an inode */
926 static int task_dumpable(struct task_struct *task)
928 int dumpable = 0;
929 struct mm_struct *mm;
931 task_lock(task);
932 mm = task->mm;
933 if (mm)
934 dumpable = mm->dumpable;
935 task_unlock(task);
936 if(dumpable == 1)
937 return 1;
938 return 0;
942 static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
944 struct inode * inode;
945 struct proc_inode *ei;
947 /* We need a new inode */
949 inode = new_inode(sb);
950 if (!inode)
951 goto out;
953 /* Common stuff */
954 ei = PROC_I(inode);
955 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
956 inode->i_op = &proc_def_inode_operations;
959 * grab the reference to task.
961 ei->pid = get_task_pid(task, PIDTYPE_PID);
962 if (!ei->pid)
963 goto out_unlock;
965 inode->i_uid = 0;
966 inode->i_gid = 0;
967 if (task_dumpable(task)) {
968 inode->i_uid = task->euid;
969 inode->i_gid = task->egid;
971 security_task_to_inode(task, inode);
973 out:
974 return inode;
976 out_unlock:
977 iput(inode);
978 return NULL;
981 static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
983 struct inode *inode = dentry->d_inode;
984 struct task_struct *task;
985 generic_fillattr(inode, stat);
987 rcu_read_lock();
988 stat->uid = 0;
989 stat->gid = 0;
990 task = pid_task(proc_pid(inode), PIDTYPE_PID);
991 if (task) {
992 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
993 task_dumpable(task)) {
994 stat->uid = task->euid;
995 stat->gid = task->egid;
998 rcu_read_unlock();
999 return 0;
1002 /* dentry stuff */
1005 * Exceptional case: normally we are not allowed to unhash a busy
1006 * directory. In this case, however, we can do it - no aliasing problems
1007 * due to the way we treat inodes.
1009 * Rewrite the inode's ownerships here because the owning task may have
1010 * performed a setuid(), etc.
1012 * Before the /proc/pid/status file was created the only way to read
1013 * the effective uid of a /process was to stat /proc/pid. Reading
1014 * /proc/pid/status is slow enough that procps and other packages
1015 * kept stating /proc/pid. To keep the rules in /proc simple I have
1016 * made this apply to all per process world readable and executable
1017 * directories.
1019 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1021 struct inode *inode = dentry->d_inode;
1022 struct task_struct *task = get_proc_task(inode);
1023 if (task) {
1024 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1025 task_dumpable(task)) {
1026 inode->i_uid = task->euid;
1027 inode->i_gid = task->egid;
1028 } else {
1029 inode->i_uid = 0;
1030 inode->i_gid = 0;
1032 inode->i_mode &= ~(S_ISUID | S_ISGID);
1033 security_task_to_inode(task, inode);
1034 put_task_struct(task);
1035 return 1;
1037 d_drop(dentry);
1038 return 0;
1041 static int pid_delete_dentry(struct dentry * dentry)
1043 /* Is the task we represent dead?
1044 * If so, then don't put the dentry on the lru list,
1045 * kill it immediately.
1047 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1050 static struct dentry_operations pid_dentry_operations =
1052 .d_revalidate = pid_revalidate,
1053 .d_delete = pid_delete_dentry,
1056 /* Lookups */
1058 typedef struct dentry *instantiate_t(struct inode *, struct dentry *, struct task_struct *, void *);
1061 * Fill a directory entry.
1063 * If possible create the dcache entry and derive our inode number and
1064 * file type from dcache entry.
1066 * Since all of the proc inode numbers are dynamically generated, the inode
1067 * numbers do not exist until the inode is cache. This means creating the
1068 * the dcache entry in readdir is necessary to keep the inode numbers
1069 * reported by readdir in sync with the inode numbers reported
1070 * by stat.
1072 static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1073 char *name, int len,
1074 instantiate_t instantiate, struct task_struct *task, void *ptr)
1076 struct dentry *child, *dir = filp->f_dentry;
1077 struct inode *inode;
1078 struct qstr qname;
1079 ino_t ino = 0;
1080 unsigned type = DT_UNKNOWN;
1082 qname.name = name;
1083 qname.len = len;
1084 qname.hash = full_name_hash(name, len);
1086 child = d_lookup(dir, &qname);
1087 if (!child) {
1088 struct dentry *new;
1089 new = d_alloc(dir, &qname);
1090 if (new) {
1091 child = instantiate(dir->d_inode, new, task, ptr);
1092 if (child)
1093 dput(new);
1094 else
1095 child = new;
1098 if (!child || IS_ERR(child) || !child->d_inode)
1099 goto end_instantiate;
1100 inode = child->d_inode;
1101 if (inode) {
1102 ino = inode->i_ino;
1103 type = inode->i_mode >> 12;
1105 dput(child);
1106 end_instantiate:
1107 if (!ino)
1108 ino = find_inode_number(dir, &qname);
1109 if (!ino)
1110 ino = 1;
1111 return filldir(dirent, name, len, filp->f_pos, ino, type);
1114 static unsigned name_to_int(struct dentry *dentry)
1116 const char *name = dentry->d_name.name;
1117 int len = dentry->d_name.len;
1118 unsigned n = 0;
1120 if (len > 1 && *name == '0')
1121 goto out;
1122 while (len-- > 0) {
1123 unsigned c = *name++ - '0';
1124 if (c > 9)
1125 goto out;
1126 if (n >= (~0U-9)/10)
1127 goto out;
1128 n *= 10;
1129 n += c;
1131 return n;
1132 out:
1133 return ~0U;
1136 static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
1138 struct task_struct *task = get_proc_task(inode);
1139 struct files_struct *files = NULL;
1140 struct file *file;
1141 int fd = proc_fd(inode);
1143 if (task) {
1144 files = get_files_struct(task);
1145 put_task_struct(task);
1147 if (files) {
1149 * We are not taking a ref to the file structure, so we must
1150 * hold ->file_lock.
1152 spin_lock(&files->file_lock);
1153 file = fcheck_files(files, fd);
1154 if (file) {
1155 *mnt = mntget(file->f_vfsmnt);
1156 *dentry = dget(file->f_dentry);
1157 spin_unlock(&files->file_lock);
1158 put_files_struct(files);
1159 return 0;
1161 spin_unlock(&files->file_lock);
1162 put_files_struct(files);
1164 return -ENOENT;
1167 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1169 struct inode *inode = dentry->d_inode;
1170 struct task_struct *task = get_proc_task(inode);
1171 int fd = proc_fd(inode);
1172 struct files_struct *files;
1174 if (task) {
1175 files = get_files_struct(task);
1176 if (files) {
1177 rcu_read_lock();
1178 if (fcheck_files(files, fd)) {
1179 rcu_read_unlock();
1180 put_files_struct(files);
1181 if (task_dumpable(task)) {
1182 inode->i_uid = task->euid;
1183 inode->i_gid = task->egid;
1184 } else {
1185 inode->i_uid = 0;
1186 inode->i_gid = 0;
1188 inode->i_mode &= ~(S_ISUID | S_ISGID);
1189 security_task_to_inode(task, inode);
1190 put_task_struct(task);
1191 return 1;
1193 rcu_read_unlock();
1194 put_files_struct(files);
1196 put_task_struct(task);
1198 d_drop(dentry);
1199 return 0;
1202 static struct dentry_operations tid_fd_dentry_operations =
1204 .d_revalidate = tid_fd_revalidate,
1205 .d_delete = pid_delete_dentry,
1208 static struct dentry *proc_fd_instantiate(struct inode *dir,
1209 struct dentry *dentry, struct task_struct *task, void *ptr)
1211 unsigned fd = *(unsigned *)ptr;
1212 struct file *file;
1213 struct files_struct *files;
1214 struct inode *inode;
1215 struct proc_inode *ei;
1216 struct dentry *error = ERR_PTR(-ENOENT);
1218 inode = proc_pid_make_inode(dir->i_sb, task);
1219 if (!inode)
1220 goto out;
1221 ei = PROC_I(inode);
1222 ei->fd = fd;
1223 files = get_files_struct(task);
1224 if (!files)
1225 goto out_iput;
1226 inode->i_mode = S_IFLNK;
1229 * We are not taking a ref to the file structure, so we must
1230 * hold ->file_lock.
1232 spin_lock(&files->file_lock);
1233 file = fcheck_files(files, fd);
1234 if (!file)
1235 goto out_unlock;
1236 if (file->f_mode & 1)
1237 inode->i_mode |= S_IRUSR | S_IXUSR;
1238 if (file->f_mode & 2)
1239 inode->i_mode |= S_IWUSR | S_IXUSR;
1240 spin_unlock(&files->file_lock);
1241 put_files_struct(files);
1243 inode->i_op = &proc_pid_link_inode_operations;
1244 inode->i_size = 64;
1245 ei->op.proc_get_link = proc_fd_link;
1246 dentry->d_op = &tid_fd_dentry_operations;
1247 d_add(dentry, inode);
1248 /* Close the race of the process dying before we return the dentry */
1249 if (tid_fd_revalidate(dentry, NULL))
1250 error = NULL;
1252 out:
1253 return error;
1254 out_unlock:
1255 spin_unlock(&files->file_lock);
1256 put_files_struct(files);
1257 out_iput:
1258 iput(inode);
1259 goto out;
1262 static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
1264 struct task_struct *task = get_proc_task(dir);
1265 unsigned fd = name_to_int(dentry);
1266 struct dentry *result = ERR_PTR(-ENOENT);
1268 if (!task)
1269 goto out_no_task;
1270 if (fd == ~0U)
1271 goto out;
1273 result = proc_fd_instantiate(dir, dentry, task, &fd);
1274 out:
1275 put_task_struct(task);
1276 out_no_task:
1277 return result;
1280 static int proc_fd_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1281 struct task_struct *task, int fd)
1283 char name[PROC_NUMBUF];
1284 int len = snprintf(name, sizeof(name), "%d", fd);
1285 return proc_fill_cache(filp, dirent, filldir, name, len,
1286 proc_fd_instantiate, task, &fd);
1289 static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1291 struct dentry *dentry = filp->f_dentry;
1292 struct inode *inode = dentry->d_inode;
1293 struct task_struct *p = get_proc_task(inode);
1294 unsigned int fd, tid, ino;
1295 int retval;
1296 struct files_struct * files;
1297 struct fdtable *fdt;
1299 retval = -ENOENT;
1300 if (!p)
1301 goto out_no_task;
1302 retval = 0;
1303 tid = p->pid;
1305 fd = filp->f_pos;
1306 switch (fd) {
1307 case 0:
1308 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
1309 goto out;
1310 filp->f_pos++;
1311 case 1:
1312 ino = parent_ino(dentry);
1313 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1314 goto out;
1315 filp->f_pos++;
1316 default:
1317 files = get_files_struct(p);
1318 if (!files)
1319 goto out;
1320 rcu_read_lock();
1321 fdt = files_fdtable(files);
1322 for (fd = filp->f_pos-2;
1323 fd < fdt->max_fds;
1324 fd++, filp->f_pos++) {
1326 if (!fcheck_files(files, fd))
1327 continue;
1328 rcu_read_unlock();
1330 if (proc_fd_fill_cache(filp, dirent, filldir, p, fd) < 0) {
1331 rcu_read_lock();
1332 break;
1334 rcu_read_lock();
1336 rcu_read_unlock();
1337 put_files_struct(files);
1339 out:
1340 put_task_struct(p);
1341 out_no_task:
1342 return retval;
1345 static struct file_operations proc_fd_operations = {
1346 .read = generic_read_dir,
1347 .readdir = proc_readfd,
1351 * proc directories can do almost nothing..
1353 static struct inode_operations proc_fd_inode_operations = {
1354 .lookup = proc_lookupfd,
1355 .setattr = proc_setattr,
1358 static struct dentry *proc_pident_instantiate(struct inode *dir,
1359 struct dentry *dentry, struct task_struct *task, void *ptr)
1361 struct pid_entry *p = ptr;
1362 struct inode *inode;
1363 struct proc_inode *ei;
1364 struct dentry *error = ERR_PTR(-EINVAL);
1366 inode = proc_pid_make_inode(dir->i_sb, task);
1367 if (!inode)
1368 goto out;
1370 ei = PROC_I(inode);
1371 inode->i_mode = p->mode;
1372 if (S_ISDIR(inode->i_mode))
1373 inode->i_nlink = 2; /* Use getattr to fix if necessary */
1374 if (p->iop)
1375 inode->i_op = p->iop;
1376 if (p->fop)
1377 inode->i_fop = p->fop;
1378 ei->op = p->op;
1379 dentry->d_op = &pid_dentry_operations;
1380 d_add(dentry, inode);
1381 /* Close the race of the process dying before we return the dentry */
1382 if (pid_revalidate(dentry, NULL))
1383 error = NULL;
1384 out:
1385 return error;
1388 static struct dentry *proc_pident_lookup(struct inode *dir,
1389 struct dentry *dentry,
1390 struct pid_entry *ents,
1391 unsigned int nents)
1393 struct inode *inode;
1394 struct dentry *error;
1395 struct task_struct *task = get_proc_task(dir);
1396 struct pid_entry *p, *last;
1398 error = ERR_PTR(-ENOENT);
1399 inode = NULL;
1401 if (!task)
1402 goto out_no_task;
1405 * Yes, it does not scale. And it should not. Don't add
1406 * new entries into /proc/<tgid>/ without very good reasons.
1408 last = &ents[nents - 1];
1409 for (p = ents; p <= last; p++) {
1410 if (p->len != dentry->d_name.len)
1411 continue;
1412 if (!memcmp(dentry->d_name.name, p->name, p->len))
1413 break;
1415 if (p > last)
1416 goto out;
1418 error = proc_pident_instantiate(dir, dentry, task, p);
1419 out:
1420 put_task_struct(task);
1421 out_no_task:
1422 return error;
1425 static int proc_pident_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1426 struct task_struct *task, struct pid_entry *p)
1428 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1429 proc_pident_instantiate, task, p);
1432 static int proc_pident_readdir(struct file *filp,
1433 void *dirent, filldir_t filldir,
1434 struct pid_entry *ents, unsigned int nents)
1436 int i;
1437 int pid;
1438 struct dentry *dentry = filp->f_dentry;
1439 struct inode *inode = dentry->d_inode;
1440 struct task_struct *task = get_proc_task(inode);
1441 struct pid_entry *p, *last;
1442 ino_t ino;
1443 int ret;
1445 ret = -ENOENT;
1446 if (!task)
1447 goto out_no_task;
1449 ret = 0;
1450 pid = task->pid;
1451 i = filp->f_pos;
1452 switch (i) {
1453 case 0:
1454 ino = inode->i_ino;
1455 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
1456 goto out;
1457 i++;
1458 filp->f_pos++;
1459 /* fall through */
1460 case 1:
1461 ino = parent_ino(dentry);
1462 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
1463 goto out;
1464 i++;
1465 filp->f_pos++;
1466 /* fall through */
1467 default:
1468 i -= 2;
1469 if (i >= nents) {
1470 ret = 1;
1471 goto out;
1473 p = ents + i;
1474 last = &ents[nents - 1];
1475 while (p <= last) {
1476 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
1477 goto out;
1478 filp->f_pos++;
1479 p++;
1483 ret = 1;
1484 out:
1485 put_task_struct(task);
1486 out_no_task:
1487 return ret;
1490 #ifdef CONFIG_SECURITY
1491 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
1492 size_t count, loff_t *ppos)
1494 struct inode * inode = file->f_dentry->d_inode;
1495 unsigned long page;
1496 ssize_t length;
1497 struct task_struct *task = get_proc_task(inode);
1499 length = -ESRCH;
1500 if (!task)
1501 goto out_no_task;
1503 if (count > PAGE_SIZE)
1504 count = PAGE_SIZE;
1505 length = -ENOMEM;
1506 if (!(page = __get_free_page(GFP_KERNEL)))
1507 goto out;
1509 length = security_getprocattr(task,
1510 (char*)file->f_dentry->d_name.name,
1511 (void*)page, count);
1512 if (length >= 0)
1513 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
1514 free_page(page);
1515 out:
1516 put_task_struct(task);
1517 out_no_task:
1518 return length;
1521 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
1522 size_t count, loff_t *ppos)
1524 struct inode * inode = file->f_dentry->d_inode;
1525 char *page;
1526 ssize_t length;
1527 struct task_struct *task = get_proc_task(inode);
1529 length = -ESRCH;
1530 if (!task)
1531 goto out_no_task;
1532 if (count > PAGE_SIZE)
1533 count = PAGE_SIZE;
1535 /* No partial writes. */
1536 length = -EINVAL;
1537 if (*ppos != 0)
1538 goto out;
1540 length = -ENOMEM;
1541 page = (char*)__get_free_page(GFP_USER);
1542 if (!page)
1543 goto out;
1545 length = -EFAULT;
1546 if (copy_from_user(page, buf, count))
1547 goto out_free;
1549 length = security_setprocattr(task,
1550 (char*)file->f_dentry->d_name.name,
1551 (void*)page, count);
1552 out_free:
1553 free_page((unsigned long) page);
1554 out:
1555 put_task_struct(task);
1556 out_no_task:
1557 return length;
1560 static struct file_operations proc_pid_attr_operations = {
1561 .read = proc_pid_attr_read,
1562 .write = proc_pid_attr_write,
1565 static struct pid_entry attr_dir_stuff[] = {
1566 REG("current", S_IRUGO|S_IWUGO, pid_attr),
1567 REG("prev", S_IRUGO, pid_attr),
1568 REG("exec", S_IRUGO|S_IWUGO, pid_attr),
1569 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr),
1570 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr),
1571 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr),
1574 static int proc_attr_dir_readdir(struct file * filp,
1575 void * dirent, filldir_t filldir)
1577 return proc_pident_readdir(filp,dirent,filldir,
1578 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
1581 static struct file_operations proc_attr_dir_operations = {
1582 .read = generic_read_dir,
1583 .readdir = proc_attr_dir_readdir,
1586 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
1587 struct dentry *dentry, struct nameidata *nd)
1589 return proc_pident_lookup(dir, dentry,
1590 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
1593 static struct inode_operations proc_attr_dir_inode_operations = {
1594 .lookup = proc_attr_dir_lookup,
1595 .getattr = pid_getattr,
1596 .setattr = proc_setattr,
1599 #endif
1602 * /proc/self:
1604 static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
1605 int buflen)
1607 char tmp[PROC_NUMBUF];
1608 sprintf(tmp, "%d", current->tgid);
1609 return vfs_readlink(dentry,buffer,buflen,tmp);
1612 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
1614 char tmp[PROC_NUMBUF];
1615 sprintf(tmp, "%d", current->tgid);
1616 return ERR_PTR(vfs_follow_link(nd,tmp));
1619 static struct inode_operations proc_self_inode_operations = {
1620 .readlink = proc_self_readlink,
1621 .follow_link = proc_self_follow_link,
1625 * proc base
1627 * These are the directory entries in the root directory of /proc
1628 * that properly belong to the /proc filesystem, as they describe
1629 * describe something that is process related.
1631 static struct pid_entry proc_base_stuff[] = {
1632 NOD("self", S_IFLNK|S_IRWXUGO,
1633 &proc_self_inode_operations, NULL, {}),
1637 * Exceptional case: normally we are not allowed to unhash a busy
1638 * directory. In this case, however, we can do it - no aliasing problems
1639 * due to the way we treat inodes.
1641 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
1643 struct inode *inode = dentry->d_inode;
1644 struct task_struct *task = get_proc_task(inode);
1645 if (task) {
1646 put_task_struct(task);
1647 return 1;
1649 d_drop(dentry);
1650 return 0;
1653 static struct dentry_operations proc_base_dentry_operations =
1655 .d_revalidate = proc_base_revalidate,
1656 .d_delete = pid_delete_dentry,
1659 static struct dentry *proc_base_instantiate(struct inode *dir,
1660 struct dentry *dentry, struct task_struct *task, void *ptr)
1662 struct pid_entry *p = ptr;
1663 struct inode *inode;
1664 struct proc_inode *ei;
1665 struct dentry *error = ERR_PTR(-EINVAL);
1667 /* Allocate the inode */
1668 error = ERR_PTR(-ENOMEM);
1669 inode = new_inode(dir->i_sb);
1670 if (!inode)
1671 goto out;
1673 /* Initialize the inode */
1674 ei = PROC_I(inode);
1675 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1678 * grab the reference to the task.
1680 ei->pid = get_task_pid(task, PIDTYPE_PID);
1681 if (!ei->pid)
1682 goto out_iput;
1684 inode->i_uid = 0;
1685 inode->i_gid = 0;
1686 inode->i_mode = p->mode;
1687 if (S_ISDIR(inode->i_mode))
1688 inode->i_nlink = 2;
1689 if (S_ISLNK(inode->i_mode))
1690 inode->i_size = 64;
1691 if (p->iop)
1692 inode->i_op = p->iop;
1693 if (p->fop)
1694 inode->i_fop = p->fop;
1695 ei->op = p->op;
1696 dentry->d_op = &proc_base_dentry_operations;
1697 d_add(dentry, inode);
1698 error = NULL;
1699 out:
1700 return error;
1701 out_iput:
1702 iput(inode);
1703 goto out;
1706 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
1708 struct dentry *error;
1709 struct task_struct *task = get_proc_task(dir);
1710 struct pid_entry *p, *last;
1712 error = ERR_PTR(-ENOENT);
1714 if (!task)
1715 goto out_no_task;
1717 /* Lookup the directory entry */
1718 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
1719 for (p = proc_base_stuff; p <= last; p++) {
1720 if (p->len != dentry->d_name.len)
1721 continue;
1722 if (!memcmp(dentry->d_name.name, p->name, p->len))
1723 break;
1725 if (p > last)
1726 goto out;
1728 error = proc_base_instantiate(dir, dentry, task, p);
1730 out:
1731 put_task_struct(task);
1732 out_no_task:
1733 return error;
1736 static int proc_base_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1737 struct task_struct *task, struct pid_entry *p)
1739 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1740 proc_base_instantiate, task, p);
1744 * Thread groups
1746 static struct file_operations proc_task_operations;
1747 static struct inode_operations proc_task_inode_operations;
1749 static struct pid_entry tgid_base_stuff[] = {
1750 DIR("task", S_IRUGO|S_IXUGO, task),
1751 DIR("fd", S_IRUSR|S_IXUSR, fd),
1752 INF("environ", S_IRUSR, pid_environ),
1753 INF("auxv", S_IRUSR, pid_auxv),
1754 INF("status", S_IRUGO, pid_status),
1755 INF("cmdline", S_IRUGO, pid_cmdline),
1756 INF("stat", S_IRUGO, tgid_stat),
1757 INF("statm", S_IRUGO, pid_statm),
1758 REG("maps", S_IRUGO, maps),
1759 #ifdef CONFIG_NUMA
1760 REG("numa_maps", S_IRUGO, numa_maps),
1761 #endif
1762 REG("mem", S_IRUSR|S_IWUSR, mem),
1763 #ifdef CONFIG_SECCOMP
1764 REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
1765 #endif
1766 LNK("cwd", cwd),
1767 LNK("root", root),
1768 LNK("exe", exe),
1769 REG("mounts", S_IRUGO, mounts),
1770 REG("mountstats", S_IRUSR, mountstats),
1771 #ifdef CONFIG_MMU
1772 REG("smaps", S_IRUGO, smaps),
1773 #endif
1774 #ifdef CONFIG_SECURITY
1775 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
1776 #endif
1777 #ifdef CONFIG_KALLSYMS
1778 INF("wchan", S_IRUGO, pid_wchan),
1779 #endif
1780 #ifdef CONFIG_SCHEDSTATS
1781 INF("schedstat", S_IRUGO, pid_schedstat),
1782 #endif
1783 #ifdef CONFIG_CPUSETS
1784 REG("cpuset", S_IRUGO, cpuset),
1785 #endif
1786 INF("oom_score", S_IRUGO, oom_score),
1787 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
1788 #ifdef CONFIG_AUDITSYSCALL
1789 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
1790 #endif
1793 static int proc_tgid_base_readdir(struct file * filp,
1794 void * dirent, filldir_t filldir)
1796 return proc_pident_readdir(filp,dirent,filldir,
1797 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
1800 static struct file_operations proc_tgid_base_operations = {
1801 .read = generic_read_dir,
1802 .readdir = proc_tgid_base_readdir,
1805 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
1806 return proc_pident_lookup(dir, dentry,
1807 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
1810 static struct inode_operations proc_tgid_base_inode_operations = {
1811 .lookup = proc_tgid_base_lookup,
1812 .getattr = pid_getattr,
1813 .setattr = proc_setattr,
1817 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
1819 * @task: task that should be flushed.
1821 * Looks in the dcache for
1822 * /proc/@pid
1823 * /proc/@tgid/task/@pid
1824 * if either directory is present flushes it and all of it'ts children
1825 * from the dcache.
1827 * It is safe and reasonable to cache /proc entries for a task until
1828 * that task exits. After that they just clog up the dcache with
1829 * useless entries, possibly causing useful dcache entries to be
1830 * flushed instead. This routine is proved to flush those useless
1831 * dcache entries at process exit time.
1833 * NOTE: This routine is just an optimization so it does not guarantee
1834 * that no dcache entries will exist at process exit time it
1835 * just makes it very unlikely that any will persist.
1837 void proc_flush_task(struct task_struct *task)
1839 struct dentry *dentry, *leader, *dir;
1840 char buf[PROC_NUMBUF];
1841 struct qstr name;
1843 name.name = buf;
1844 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
1845 dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
1846 if (dentry) {
1847 shrink_dcache_parent(dentry);
1848 d_drop(dentry);
1849 dput(dentry);
1852 if (thread_group_leader(task))
1853 goto out;
1855 name.name = buf;
1856 name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
1857 leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
1858 if (!leader)
1859 goto out;
1861 name.name = "task";
1862 name.len = strlen(name.name);
1863 dir = d_hash_and_lookup(leader, &name);
1864 if (!dir)
1865 goto out_put_leader;
1867 name.name = buf;
1868 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
1869 dentry = d_hash_and_lookup(dir, &name);
1870 if (dentry) {
1871 shrink_dcache_parent(dentry);
1872 d_drop(dentry);
1873 dput(dentry);
1876 dput(dir);
1877 out_put_leader:
1878 dput(leader);
1879 out:
1880 return;
1883 struct dentry *proc_pid_instantiate(struct inode *dir,
1884 struct dentry * dentry, struct task_struct *task, void *ptr)
1886 struct dentry *error = ERR_PTR(-ENOENT);
1887 struct inode *inode;
1889 inode = proc_pid_make_inode(dir->i_sb, task);
1890 if (!inode)
1891 goto out;
1893 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
1894 inode->i_op = &proc_tgid_base_inode_operations;
1895 inode->i_fop = &proc_tgid_base_operations;
1896 inode->i_flags|=S_IMMUTABLE;
1897 inode->i_nlink = 4;
1898 #ifdef CONFIG_SECURITY
1899 inode->i_nlink += 1;
1900 #endif
1902 dentry->d_op = &pid_dentry_operations;
1904 d_add(dentry, inode);
1905 /* Close the race of the process dying before we return the dentry */
1906 if (pid_revalidate(dentry, NULL))
1907 error = NULL;
1908 out:
1909 return error;
1912 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
1914 struct dentry *result = ERR_PTR(-ENOENT);
1915 struct task_struct *task;
1916 unsigned tgid;
1918 result = proc_base_lookup(dir, dentry);
1919 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
1920 goto out;
1922 tgid = name_to_int(dentry);
1923 if (tgid == ~0U)
1924 goto out;
1926 rcu_read_lock();
1927 task = find_task_by_pid(tgid);
1928 if (task)
1929 get_task_struct(task);
1930 rcu_read_unlock();
1931 if (!task)
1932 goto out;
1934 result = proc_pid_instantiate(dir, dentry, task, NULL);
1935 put_task_struct(task);
1936 out:
1937 return result;
1941 * Find the first task with tgid >= tgid
1944 static struct task_struct *next_tgid(unsigned int tgid)
1946 struct task_struct *task;
1947 struct pid *pid;
1949 rcu_read_lock();
1950 retry:
1951 task = NULL;
1952 pid = find_ge_pid(tgid);
1953 if (pid) {
1954 tgid = pid->nr + 1;
1955 task = pid_task(pid, PIDTYPE_PID);
1956 /* What we to know is if the pid we have find is the
1957 * pid of a thread_group_leader. Testing for task
1958 * being a thread_group_leader is the obvious thing
1959 * todo but there is a window when it fails, due to
1960 * the pid transfer logic in de_thread.
1962 * So we perform the straight forward test of seeing
1963 * if the pid we have found is the pid of a thread
1964 * group leader, and don't worry if the task we have
1965 * found doesn't happen to be a thread group leader.
1966 * As we don't care in the case of readdir.
1968 if (!task || !has_group_leader_pid(task))
1969 goto retry;
1970 get_task_struct(task);
1972 rcu_read_unlock();
1973 return task;
1976 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
1978 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1979 struct task_struct *task, int tgid)
1981 char name[PROC_NUMBUF];
1982 int len = snprintf(name, sizeof(name), "%d", tgid);
1983 return proc_fill_cache(filp, dirent, filldir, name, len,
1984 proc_pid_instantiate, task, NULL);
1987 /* for the /proc/ directory itself, after non-process stuff has been done */
1988 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
1990 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
1991 struct task_struct *reaper = get_proc_task(filp->f_dentry->d_inode);
1992 struct task_struct *task;
1993 int tgid;
1995 if (!reaper)
1996 goto out_no_task;
1998 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
1999 struct pid_entry *p = &proc_base_stuff[nr];
2000 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
2001 goto out;
2004 tgid = filp->f_pos - TGID_OFFSET;
2005 for (task = next_tgid(tgid);
2006 task;
2007 put_task_struct(task), task = next_tgid(tgid + 1)) {
2008 tgid = task->pid;
2009 filp->f_pos = tgid + TGID_OFFSET;
2010 if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) {
2011 put_task_struct(task);
2012 goto out;
2015 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
2016 out:
2017 put_task_struct(reaper);
2018 out_no_task:
2019 return 0;
2023 * Tasks
2025 static struct pid_entry tid_base_stuff[] = {
2026 DIR("fd", S_IRUSR|S_IXUSR, fd),
2027 INF("environ", S_IRUSR, pid_environ),
2028 INF("auxv", S_IRUSR, pid_auxv),
2029 INF("status", S_IRUGO, pid_status),
2030 INF("cmdline", S_IRUGO, pid_cmdline),
2031 INF("stat", S_IRUGO, tid_stat),
2032 INF("statm", S_IRUGO, pid_statm),
2033 REG("maps", S_IRUGO, maps),
2034 #ifdef CONFIG_NUMA
2035 REG("numa_maps", S_IRUGO, numa_maps),
2036 #endif
2037 REG("mem", S_IRUSR|S_IWUSR, mem),
2038 #ifdef CONFIG_SECCOMP
2039 REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
2040 #endif
2041 LNK("cwd", cwd),
2042 LNK("root", root),
2043 LNK("exe", exe),
2044 REG("mounts", S_IRUGO, mounts),
2045 #ifdef CONFIG_MMU
2046 REG("smaps", S_IRUGO, smaps),
2047 #endif
2048 #ifdef CONFIG_SECURITY
2049 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
2050 #endif
2051 #ifdef CONFIG_KALLSYMS
2052 INF("wchan", S_IRUGO, pid_wchan),
2053 #endif
2054 #ifdef CONFIG_SCHEDSTATS
2055 INF("schedstat", S_IRUGO, pid_schedstat),
2056 #endif
2057 #ifdef CONFIG_CPUSETS
2058 REG("cpuset", S_IRUGO, cpuset),
2059 #endif
2060 INF("oom_score", S_IRUGO, oom_score),
2061 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2062 #ifdef CONFIG_AUDITSYSCALL
2063 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2064 #endif
2067 static int proc_tid_base_readdir(struct file * filp,
2068 void * dirent, filldir_t filldir)
2070 return proc_pident_readdir(filp,dirent,filldir,
2071 tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
2074 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
2075 return proc_pident_lookup(dir, dentry,
2076 tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
2079 static struct file_operations proc_tid_base_operations = {
2080 .read = generic_read_dir,
2081 .readdir = proc_tid_base_readdir,
2084 static struct inode_operations proc_tid_base_inode_operations = {
2085 .lookup = proc_tid_base_lookup,
2086 .getattr = pid_getattr,
2087 .setattr = proc_setattr,
2090 static struct dentry *proc_task_instantiate(struct inode *dir,
2091 struct dentry *dentry, struct task_struct *task, void *ptr)
2093 struct dentry *error = ERR_PTR(-ENOENT);
2094 struct inode *inode;
2095 inode = proc_pid_make_inode(dir->i_sb, task);
2097 if (!inode)
2098 goto out;
2099 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2100 inode->i_op = &proc_tid_base_inode_operations;
2101 inode->i_fop = &proc_tid_base_operations;
2102 inode->i_flags|=S_IMMUTABLE;
2103 inode->i_nlink = 3;
2104 #ifdef CONFIG_SECURITY
2105 inode->i_nlink += 1;
2106 #endif
2108 dentry->d_op = &pid_dentry_operations;
2110 d_add(dentry, inode);
2111 /* Close the race of the process dying before we return the dentry */
2112 if (pid_revalidate(dentry, NULL))
2113 error = NULL;
2114 out:
2115 return error;
2118 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2120 struct dentry *result = ERR_PTR(-ENOENT);
2121 struct task_struct *task;
2122 struct task_struct *leader = get_proc_task(dir);
2123 unsigned tid;
2125 if (!leader)
2126 goto out_no_task;
2128 tid = name_to_int(dentry);
2129 if (tid == ~0U)
2130 goto out;
2132 rcu_read_lock();
2133 task = find_task_by_pid(tid);
2134 if (task)
2135 get_task_struct(task);
2136 rcu_read_unlock();
2137 if (!task)
2138 goto out;
2139 if (leader->tgid != task->tgid)
2140 goto out_drop_task;
2142 result = proc_task_instantiate(dir, dentry, task, NULL);
2143 out_drop_task:
2144 put_task_struct(task);
2145 out:
2146 put_task_struct(leader);
2147 out_no_task:
2148 return result;
2152 * Find the first tid of a thread group to return to user space.
2154 * Usually this is just the thread group leader, but if the users
2155 * buffer was too small or there was a seek into the middle of the
2156 * directory we have more work todo.
2158 * In the case of a short read we start with find_task_by_pid.
2160 * In the case of a seek we start with the leader and walk nr
2161 * threads past it.
2163 static struct task_struct *first_tid(struct task_struct *leader,
2164 int tid, int nr)
2166 struct task_struct *pos;
2168 rcu_read_lock();
2169 /* Attempt to start with the pid of a thread */
2170 if (tid && (nr > 0)) {
2171 pos = find_task_by_pid(tid);
2172 if (pos && (pos->group_leader == leader))
2173 goto found;
2176 /* If nr exceeds the number of threads there is nothing todo */
2177 pos = NULL;
2178 if (nr && nr >= get_nr_threads(leader))
2179 goto out;
2181 /* If we haven't found our starting place yet start
2182 * with the leader and walk nr threads forward.
2184 for (pos = leader; nr > 0; --nr) {
2185 pos = next_thread(pos);
2186 if (pos == leader) {
2187 pos = NULL;
2188 goto out;
2191 found:
2192 get_task_struct(pos);
2193 out:
2194 rcu_read_unlock();
2195 return pos;
2199 * Find the next thread in the thread list.
2200 * Return NULL if there is an error or no next thread.
2202 * The reference to the input task_struct is released.
2204 static struct task_struct *next_tid(struct task_struct *start)
2206 struct task_struct *pos = NULL;
2207 rcu_read_lock();
2208 if (pid_alive(start)) {
2209 pos = next_thread(start);
2210 if (thread_group_leader(pos))
2211 pos = NULL;
2212 else
2213 get_task_struct(pos);
2215 rcu_read_unlock();
2216 put_task_struct(start);
2217 return pos;
2220 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2221 struct task_struct *task, int tid)
2223 char name[PROC_NUMBUF];
2224 int len = snprintf(name, sizeof(name), "%d", tid);
2225 return proc_fill_cache(filp, dirent, filldir, name, len,
2226 proc_task_instantiate, task, NULL);
2229 /* for the /proc/TGID/task/ directories */
2230 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
2232 struct dentry *dentry = filp->f_dentry;
2233 struct inode *inode = dentry->d_inode;
2234 struct task_struct *leader = get_proc_task(inode);
2235 struct task_struct *task;
2236 int retval = -ENOENT;
2237 ino_t ino;
2238 int tid;
2239 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
2241 if (!leader)
2242 goto out_no_task;
2243 retval = 0;
2245 switch (pos) {
2246 case 0:
2247 ino = inode->i_ino;
2248 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
2249 goto out;
2250 pos++;
2251 /* fall through */
2252 case 1:
2253 ino = parent_ino(dentry);
2254 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
2255 goto out;
2256 pos++;
2257 /* fall through */
2260 /* f_version caches the tgid value that the last readdir call couldn't
2261 * return. lseek aka telldir automagically resets f_version to 0.
2263 tid = filp->f_version;
2264 filp->f_version = 0;
2265 for (task = first_tid(leader, tid, pos - 2);
2266 task;
2267 task = next_tid(task), pos++) {
2268 tid = task->pid;
2269 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
2270 /* returning this tgid failed, save it as the first
2271 * pid for the next readir call */
2272 filp->f_version = tid;
2273 put_task_struct(task);
2274 break;
2277 out:
2278 filp->f_pos = pos;
2279 put_task_struct(leader);
2280 out_no_task:
2281 return retval;
2284 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
2286 struct inode *inode = dentry->d_inode;
2287 struct task_struct *p = get_proc_task(inode);
2288 generic_fillattr(inode, stat);
2290 if (p) {
2291 rcu_read_lock();
2292 stat->nlink += get_nr_threads(p);
2293 rcu_read_unlock();
2294 put_task_struct(p);
2297 return 0;
2300 static struct inode_operations proc_task_inode_operations = {
2301 .lookup = proc_task_lookup,
2302 .getattr = proc_task_getattr,
2303 .setattr = proc_setattr,
2306 static struct file_operations proc_task_operations = {
2307 .read = generic_read_dir,
2308 .readdir = proc_task_readdir,