ACPI: thinkpad-acpi: add a safety net for TPEC fan control mode
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / proc / base.c
blob1a979ea3b379931973f79c4b70703df3f237c714
1 /*
2 * linux/fs/proc/base.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * proc base directory handling functions
8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
9 * Instead of using magical inumbers to determine the kind of object
10 * we allocate and fill in-core inodes upon lookup. They don't even
11 * go into icache. We cache the reference to task_struct upon lookup too.
12 * Eventually it should become a filesystem in its own. We don't use the
13 * rest of procfs anymore.
16 * Changelog:
17 * 17-Jan-2005
18 * Allan Bezerra
19 * Bruna Moreira <bruna.moreira@indt.org.br>
20 * Edjard Mota <edjard.mota@indt.org.br>
21 * Ilias Biris <ilias.biris@indt.org.br>
22 * Mauricio Lin <mauricio.lin@indt.org.br>
24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
26 * A new process specific entry (smaps) included in /proc. It shows the
27 * size of rss for each memory area. The maps entry lacks information
28 * about physical memory size (rss) for each mapped file, i.e.,
29 * rss information for executables and library files.
30 * This additional information is useful for any tools that need to know
31 * about physical memory consumption for a process specific library.
33 * Changelog:
34 * 21-Feb-2005
35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
36 * Pud inclusion in the page table walking.
38 * ChangeLog:
39 * 10-Mar-2005
40 * 10LE Instituto Nokia de Tecnologia - INdT:
41 * A better way to walks through the page table as suggested by Hugh Dickins.
43 * Simo Piiroinen <simo.piiroinen@nokia.com>:
44 * Smaps information related to shared, private, clean and dirty pages.
46 * Paul Mundt <paul.mundt@nokia.com>:
47 * Overall revision about smaps.
50 #include <asm/uaccess.h>
52 #include <linux/errno.h>
53 #include <linux/time.h>
54 #include <linux/proc_fs.h>
55 #include <linux/stat.h>
56 #include <linux/init.h>
57 #include <linux/capability.h>
58 #include <linux/file.h>
59 #include <linux/string.h>
60 #include <linux/seq_file.h>
61 #include <linux/namei.h>
62 #include <linux/mnt_namespace.h>
63 #include <linux/mm.h>
64 #include <linux/smp_lock.h>
65 #include <linux/rcupdate.h>
66 #include <linux/kallsyms.h>
67 #include <linux/mount.h>
68 #include <linux/security.h>
69 #include <linux/ptrace.h>
70 #include <linux/seccomp.h>
71 #include <linux/cpuset.h>
72 #include <linux/audit.h>
73 #include <linux/poll.h>
74 #include <linux/nsproxy.h>
75 #include <linux/oom.h>
76 #include "internal.h"
78 /* NOTE:
79 * Implementing inode permission operations in /proc is almost
80 * certainly an error. Permission checks need to happen during
81 * each system call not at open time. The reason is that most of
82 * what we wish to check for permissions in /proc varies at runtime.
84 * The classic example of a problem is opening file descriptors
85 * in /proc for a task before it execs a suid executable.
89 /* Worst case buffer size needed for holding an integer. */
90 #define PROC_NUMBUF 13
92 struct pid_entry {
93 int len;
94 char *name;
95 mode_t mode;
96 struct inode_operations *iop;
97 struct file_operations *fop;
98 union proc_op op;
101 #define NOD(NAME, MODE, IOP, FOP, OP) { \
102 .len = sizeof(NAME) - 1, \
103 .name = (NAME), \
104 .mode = MODE, \
105 .iop = IOP, \
106 .fop = FOP, \
107 .op = OP, \
110 #define DIR(NAME, MODE, OTYPE) \
111 NOD(NAME, (S_IFDIR|(MODE)), \
112 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \
113 {} )
114 #define LNK(NAME, OTYPE) \
115 NOD(NAME, (S_IFLNK|S_IRWXUGO), \
116 &proc_pid_link_inode_operations, NULL, \
117 { .proc_get_link = &proc_##OTYPE##_link } )
118 #define REG(NAME, MODE, OTYPE) \
119 NOD(NAME, (S_IFREG|(MODE)), NULL, \
120 &proc_##OTYPE##_operations, {})
121 #define INF(NAME, MODE, OTYPE) \
122 NOD(NAME, (S_IFREG|(MODE)), \
123 NULL, &proc_info_file_operations, \
124 { .proc_read = &proc_##OTYPE } )
126 static struct fs_struct *get_fs_struct(struct task_struct *task)
128 struct fs_struct *fs;
129 task_lock(task);
130 fs = task->fs;
131 if(fs)
132 atomic_inc(&fs->count);
133 task_unlock(task);
134 return fs;
137 static int get_nr_threads(struct task_struct *tsk)
139 /* Must be called with the rcu_read_lock held */
140 unsigned long flags;
141 int count = 0;
143 if (lock_task_sighand(tsk, &flags)) {
144 count = atomic_read(&tsk->signal->count);
145 unlock_task_sighand(tsk, &flags);
147 return count;
150 static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
152 struct task_struct *task = get_proc_task(inode);
153 struct fs_struct *fs = NULL;
154 int result = -ENOENT;
156 if (task) {
157 fs = get_fs_struct(task);
158 put_task_struct(task);
160 if (fs) {
161 read_lock(&fs->lock);
162 *mnt = mntget(fs->pwdmnt);
163 *dentry = dget(fs->pwd);
164 read_unlock(&fs->lock);
165 result = 0;
166 put_fs_struct(fs);
168 return result;
171 static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
173 struct task_struct *task = get_proc_task(inode);
174 struct fs_struct *fs = NULL;
175 int result = -ENOENT;
177 if (task) {
178 fs = get_fs_struct(task);
179 put_task_struct(task);
181 if (fs) {
182 read_lock(&fs->lock);
183 *mnt = mntget(fs->rootmnt);
184 *dentry = dget(fs->root);
185 read_unlock(&fs->lock);
186 result = 0;
187 put_fs_struct(fs);
189 return result;
192 #define MAY_PTRACE(task) \
193 (task == current || \
194 (task->parent == current && \
195 (task->ptrace & PT_PTRACED) && \
196 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
197 security_ptrace(current,task) == 0))
199 static int proc_pid_environ(struct task_struct *task, char * buffer)
201 int res = 0;
202 struct mm_struct *mm = get_task_mm(task);
203 if (mm) {
204 unsigned int len = mm->env_end - mm->env_start;
205 if (len > PAGE_SIZE)
206 len = PAGE_SIZE;
207 res = access_process_vm(task, mm->env_start, buffer, len, 0);
208 if (!ptrace_may_attach(task))
209 res = -ESRCH;
210 mmput(mm);
212 return res;
215 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
217 int res = 0;
218 unsigned int len;
219 struct mm_struct *mm = get_task_mm(task);
220 if (!mm)
221 goto out;
222 if (!mm->arg_end)
223 goto out_mm; /* Shh! No looking before we're done */
225 len = mm->arg_end - mm->arg_start;
227 if (len > PAGE_SIZE)
228 len = PAGE_SIZE;
230 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
232 // If the nul at the end of args has been overwritten, then
233 // assume application is using setproctitle(3).
234 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
235 len = strnlen(buffer, res);
236 if (len < res) {
237 res = len;
238 } else {
239 len = mm->env_end - mm->env_start;
240 if (len > PAGE_SIZE - res)
241 len = PAGE_SIZE - res;
242 res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
243 res = strnlen(buffer, res);
246 out_mm:
247 mmput(mm);
248 out:
249 return res;
252 static int proc_pid_auxv(struct task_struct *task, char *buffer)
254 int res = 0;
255 struct mm_struct *mm = get_task_mm(task);
256 if (mm) {
257 unsigned int nwords = 0;
259 nwords += 2;
260 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
261 res = nwords * sizeof(mm->saved_auxv[0]);
262 if (res > PAGE_SIZE)
263 res = PAGE_SIZE;
264 memcpy(buffer, mm->saved_auxv, res);
265 mmput(mm);
267 return res;
271 #ifdef CONFIG_KALLSYMS
273 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
274 * Returns the resolved symbol. If that fails, simply return the address.
276 static int proc_pid_wchan(struct task_struct *task, char *buffer)
278 char *modname;
279 const char *sym_name;
280 unsigned long wchan, size, offset;
281 char namebuf[KSYM_NAME_LEN+1];
283 wchan = get_wchan(task);
285 sym_name = kallsyms_lookup(wchan, &size, &offset, &modname, namebuf);
286 if (sym_name)
287 return sprintf(buffer, "%s", sym_name);
288 return sprintf(buffer, "%lu", wchan);
290 #endif /* CONFIG_KALLSYMS */
292 #ifdef CONFIG_SCHEDSTATS
294 * Provides /proc/PID/schedstat
296 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
298 return sprintf(buffer, "%lu %lu %lu\n",
299 task->sched_info.cpu_time,
300 task->sched_info.run_delay,
301 task->sched_info.pcnt);
303 #endif
305 /* The badness from the OOM killer */
306 unsigned long badness(struct task_struct *p, unsigned long uptime);
307 static int proc_oom_score(struct task_struct *task, char *buffer)
309 unsigned long points;
310 struct timespec uptime;
312 do_posix_clock_monotonic_gettime(&uptime);
313 points = badness(task, uptime.tv_sec);
314 return sprintf(buffer, "%lu\n", points);
317 /************************************************************************/
318 /* Here the fs part begins */
319 /************************************************************************/
321 /* permission checks */
322 static int proc_fd_access_allowed(struct inode *inode)
324 struct task_struct *task;
325 int allowed = 0;
326 /* Allow access to a task's file descriptors if it is us or we
327 * may use ptrace attach to the process and find out that
328 * information.
330 task = get_proc_task(inode);
331 if (task) {
332 allowed = ptrace_may_attach(task);
333 put_task_struct(task);
335 return allowed;
338 static int proc_setattr(struct dentry *dentry, struct iattr *attr)
340 int error;
341 struct inode *inode = dentry->d_inode;
343 if (attr->ia_valid & ATTR_MODE)
344 return -EPERM;
346 error = inode_change_ok(inode, attr);
347 if (!error) {
348 error = security_inode_setattr(dentry, attr);
349 if (!error)
350 error = inode_setattr(inode, attr);
352 return error;
355 static struct inode_operations proc_def_inode_operations = {
356 .setattr = proc_setattr,
359 extern struct seq_operations mounts_op;
360 struct proc_mounts {
361 struct seq_file m;
362 int event;
365 static int mounts_open(struct inode *inode, struct file *file)
367 struct task_struct *task = get_proc_task(inode);
368 struct mnt_namespace *ns = NULL;
369 struct proc_mounts *p;
370 int ret = -EINVAL;
372 if (task) {
373 task_lock(task);
374 if (task->nsproxy) {
375 ns = task->nsproxy->mnt_ns;
376 if (ns)
377 get_mnt_ns(ns);
379 task_unlock(task);
380 put_task_struct(task);
383 if (ns) {
384 ret = -ENOMEM;
385 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
386 if (p) {
387 file->private_data = &p->m;
388 ret = seq_open(file, &mounts_op);
389 if (!ret) {
390 p->m.private = ns;
391 p->event = ns->event;
392 return 0;
394 kfree(p);
396 put_mnt_ns(ns);
398 return ret;
401 static int mounts_release(struct inode *inode, struct file *file)
403 struct seq_file *m = file->private_data;
404 struct mnt_namespace *ns = m->private;
405 put_mnt_ns(ns);
406 return seq_release(inode, file);
409 static unsigned mounts_poll(struct file *file, poll_table *wait)
411 struct proc_mounts *p = file->private_data;
412 struct mnt_namespace *ns = p->m.private;
413 unsigned res = 0;
415 poll_wait(file, &ns->poll, wait);
417 spin_lock(&vfsmount_lock);
418 if (p->event != ns->event) {
419 p->event = ns->event;
420 res = POLLERR;
422 spin_unlock(&vfsmount_lock);
424 return res;
427 static struct file_operations proc_mounts_operations = {
428 .open = mounts_open,
429 .read = seq_read,
430 .llseek = seq_lseek,
431 .release = mounts_release,
432 .poll = mounts_poll,
435 extern struct seq_operations mountstats_op;
436 static int mountstats_open(struct inode *inode, struct file *file)
438 int ret = seq_open(file, &mountstats_op);
440 if (!ret) {
441 struct seq_file *m = file->private_data;
442 struct mnt_namespace *mnt_ns = NULL;
443 struct task_struct *task = get_proc_task(inode);
445 if (task) {
446 task_lock(task);
447 if (task->nsproxy)
448 mnt_ns = task->nsproxy->mnt_ns;
449 if (mnt_ns)
450 get_mnt_ns(mnt_ns);
451 task_unlock(task);
452 put_task_struct(task);
455 if (mnt_ns)
456 m->private = mnt_ns;
457 else {
458 seq_release(inode, file);
459 ret = -EINVAL;
462 return ret;
465 static struct file_operations proc_mountstats_operations = {
466 .open = mountstats_open,
467 .read = seq_read,
468 .llseek = seq_lseek,
469 .release = mounts_release,
472 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
474 static ssize_t proc_info_read(struct file * file, char __user * buf,
475 size_t count, loff_t *ppos)
477 struct inode * inode = file->f_path.dentry->d_inode;
478 unsigned long page;
479 ssize_t length;
480 struct task_struct *task = get_proc_task(inode);
482 length = -ESRCH;
483 if (!task)
484 goto out_no_task;
486 if (count > PROC_BLOCK_SIZE)
487 count = PROC_BLOCK_SIZE;
489 length = -ENOMEM;
490 if (!(page = __get_free_page(GFP_KERNEL)))
491 goto out;
493 length = PROC_I(inode)->op.proc_read(task, (char*)page);
495 if (length >= 0)
496 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
497 free_page(page);
498 out:
499 put_task_struct(task);
500 out_no_task:
501 return length;
504 static struct file_operations proc_info_file_operations = {
505 .read = proc_info_read,
508 static int mem_open(struct inode* inode, struct file* file)
510 file->private_data = (void*)((long)current->self_exec_id);
511 return 0;
514 static ssize_t mem_read(struct file * file, char __user * buf,
515 size_t count, loff_t *ppos)
517 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
518 char *page;
519 unsigned long src = *ppos;
520 int ret = -ESRCH;
521 struct mm_struct *mm;
523 if (!task)
524 goto out_no_task;
526 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
527 goto out;
529 ret = -ENOMEM;
530 page = (char *)__get_free_page(GFP_USER);
531 if (!page)
532 goto out;
534 ret = 0;
536 mm = get_task_mm(task);
537 if (!mm)
538 goto out_free;
540 ret = -EIO;
542 if (file->private_data != (void*)((long)current->self_exec_id))
543 goto out_put;
545 ret = 0;
547 while (count > 0) {
548 int this_len, retval;
550 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
551 retval = access_process_vm(task, src, page, this_len, 0);
552 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
553 if (!ret)
554 ret = -EIO;
555 break;
558 if (copy_to_user(buf, page, retval)) {
559 ret = -EFAULT;
560 break;
563 ret += retval;
564 src += retval;
565 buf += retval;
566 count -= retval;
568 *ppos = src;
570 out_put:
571 mmput(mm);
572 out_free:
573 free_page((unsigned long) page);
574 out:
575 put_task_struct(task);
576 out_no_task:
577 return ret;
580 #define mem_write NULL
582 #ifndef mem_write
583 /* This is a security hazard */
584 static ssize_t mem_write(struct file * file, const char * buf,
585 size_t count, loff_t *ppos)
587 int copied;
588 char *page;
589 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
590 unsigned long dst = *ppos;
592 copied = -ESRCH;
593 if (!task)
594 goto out_no_task;
596 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
597 goto out;
599 copied = -ENOMEM;
600 page = (char *)__get_free_page(GFP_USER);
601 if (!page)
602 goto out;
604 copied = 0;
605 while (count > 0) {
606 int this_len, retval;
608 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
609 if (copy_from_user(page, buf, this_len)) {
610 copied = -EFAULT;
611 break;
613 retval = access_process_vm(task, dst, page, this_len, 1);
614 if (!retval) {
615 if (!copied)
616 copied = -EIO;
617 break;
619 copied += retval;
620 buf += retval;
621 dst += retval;
622 count -= retval;
624 *ppos = dst;
625 free_page((unsigned long) page);
626 out:
627 put_task_struct(task);
628 out_no_task:
629 return copied;
631 #endif
633 static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
635 switch (orig) {
636 case 0:
637 file->f_pos = offset;
638 break;
639 case 1:
640 file->f_pos += offset;
641 break;
642 default:
643 return -EINVAL;
645 force_successful_syscall_return();
646 return file->f_pos;
649 static struct file_operations proc_mem_operations = {
650 .llseek = mem_lseek,
651 .read = mem_read,
652 .write = mem_write,
653 .open = mem_open,
656 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
657 size_t count, loff_t *ppos)
659 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
660 char buffer[PROC_NUMBUF];
661 size_t len;
662 int oom_adjust;
663 loff_t __ppos = *ppos;
665 if (!task)
666 return -ESRCH;
667 oom_adjust = task->oomkilladj;
668 put_task_struct(task);
670 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
671 if (__ppos >= len)
672 return 0;
673 if (count > len-__ppos)
674 count = len-__ppos;
675 if (copy_to_user(buf, buffer + __ppos, count))
676 return -EFAULT;
677 *ppos = __ppos + count;
678 return count;
681 static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
682 size_t count, loff_t *ppos)
684 struct task_struct *task;
685 char buffer[PROC_NUMBUF], *end;
686 int oom_adjust;
688 memset(buffer, 0, sizeof(buffer));
689 if (count > sizeof(buffer) - 1)
690 count = sizeof(buffer) - 1;
691 if (copy_from_user(buffer, buf, count))
692 return -EFAULT;
693 oom_adjust = simple_strtol(buffer, &end, 0);
694 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
695 oom_adjust != OOM_DISABLE)
696 return -EINVAL;
697 if (*end == '\n')
698 end++;
699 task = get_proc_task(file->f_path.dentry->d_inode);
700 if (!task)
701 return -ESRCH;
702 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
703 put_task_struct(task);
704 return -EACCES;
706 task->oomkilladj = oom_adjust;
707 put_task_struct(task);
708 if (end - buffer == 0)
709 return -EIO;
710 return end - buffer;
713 static struct file_operations proc_oom_adjust_operations = {
714 .read = oom_adjust_read,
715 .write = oom_adjust_write,
718 #ifdef CONFIG_AUDITSYSCALL
719 #define TMPBUFLEN 21
720 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
721 size_t count, loff_t *ppos)
723 struct inode * inode = file->f_path.dentry->d_inode;
724 struct task_struct *task = get_proc_task(inode);
725 ssize_t length;
726 char tmpbuf[TMPBUFLEN];
728 if (!task)
729 return -ESRCH;
730 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
731 audit_get_loginuid(task->audit_context));
732 put_task_struct(task);
733 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
736 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
737 size_t count, loff_t *ppos)
739 struct inode * inode = file->f_path.dentry->d_inode;
740 char *page, *tmp;
741 ssize_t length;
742 uid_t loginuid;
744 if (!capable(CAP_AUDIT_CONTROL))
745 return -EPERM;
747 if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
748 return -EPERM;
750 if (count >= PAGE_SIZE)
751 count = PAGE_SIZE - 1;
753 if (*ppos != 0) {
754 /* No partial writes. */
755 return -EINVAL;
757 page = (char*)__get_free_page(GFP_USER);
758 if (!page)
759 return -ENOMEM;
760 length = -EFAULT;
761 if (copy_from_user(page, buf, count))
762 goto out_free_page;
764 page[count] = '\0';
765 loginuid = simple_strtoul(page, &tmp, 10);
766 if (tmp == page) {
767 length = -EINVAL;
768 goto out_free_page;
771 length = audit_set_loginuid(current, loginuid);
772 if (likely(length == 0))
773 length = count;
775 out_free_page:
776 free_page((unsigned long) page);
777 return length;
780 static struct file_operations proc_loginuid_operations = {
781 .read = proc_loginuid_read,
782 .write = proc_loginuid_write,
784 #endif
786 #ifdef CONFIG_SECCOMP
787 static ssize_t seccomp_read(struct file *file, char __user *buf,
788 size_t count, loff_t *ppos)
790 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
791 char __buf[20];
792 loff_t __ppos = *ppos;
793 size_t len;
795 if (!tsk)
796 return -ESRCH;
797 /* no need to print the trailing zero, so use only len */
798 len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
799 put_task_struct(tsk);
800 if (__ppos >= len)
801 return 0;
802 if (count > len - __ppos)
803 count = len - __ppos;
804 if (copy_to_user(buf, __buf + __ppos, count))
805 return -EFAULT;
806 *ppos = __ppos + count;
807 return count;
810 static ssize_t seccomp_write(struct file *file, const char __user *buf,
811 size_t count, loff_t *ppos)
813 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
814 char __buf[20], *end;
815 unsigned int seccomp_mode;
816 ssize_t result;
818 result = -ESRCH;
819 if (!tsk)
820 goto out_no_task;
822 /* can set it only once to be even more secure */
823 result = -EPERM;
824 if (unlikely(tsk->seccomp.mode))
825 goto out;
827 result = -EFAULT;
828 memset(__buf, 0, sizeof(__buf));
829 count = min(count, sizeof(__buf) - 1);
830 if (copy_from_user(__buf, buf, count))
831 goto out;
833 seccomp_mode = simple_strtoul(__buf, &end, 0);
834 if (*end == '\n')
835 end++;
836 result = -EINVAL;
837 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
838 tsk->seccomp.mode = seccomp_mode;
839 set_tsk_thread_flag(tsk, TIF_SECCOMP);
840 } else
841 goto out;
842 result = -EIO;
843 if (unlikely(!(end - __buf)))
844 goto out;
845 result = end - __buf;
846 out:
847 put_task_struct(tsk);
848 out_no_task:
849 return result;
852 static struct file_operations proc_seccomp_operations = {
853 .read = seccomp_read,
854 .write = seccomp_write,
856 #endif /* CONFIG_SECCOMP */
858 #ifdef CONFIG_FAULT_INJECTION
859 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
860 size_t count, loff_t *ppos)
862 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
863 char buffer[PROC_NUMBUF];
864 size_t len;
865 int make_it_fail;
866 loff_t __ppos = *ppos;
868 if (!task)
869 return -ESRCH;
870 make_it_fail = task->make_it_fail;
871 put_task_struct(task);
873 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
874 if (__ppos >= len)
875 return 0;
876 if (count > len-__ppos)
877 count = len-__ppos;
878 if (copy_to_user(buf, buffer + __ppos, count))
879 return -EFAULT;
880 *ppos = __ppos + count;
881 return count;
884 static ssize_t proc_fault_inject_write(struct file * file,
885 const char __user * buf, size_t count, loff_t *ppos)
887 struct task_struct *task;
888 char buffer[PROC_NUMBUF], *end;
889 int make_it_fail;
891 if (!capable(CAP_SYS_RESOURCE))
892 return -EPERM;
893 memset(buffer, 0, sizeof(buffer));
894 if (count > sizeof(buffer) - 1)
895 count = sizeof(buffer) - 1;
896 if (copy_from_user(buffer, buf, count))
897 return -EFAULT;
898 make_it_fail = simple_strtol(buffer, &end, 0);
899 if (*end == '\n')
900 end++;
901 task = get_proc_task(file->f_dentry->d_inode);
902 if (!task)
903 return -ESRCH;
904 task->make_it_fail = make_it_fail;
905 put_task_struct(task);
906 if (end - buffer == 0)
907 return -EIO;
908 return end - buffer;
911 static struct file_operations proc_fault_inject_operations = {
912 .read = proc_fault_inject_read,
913 .write = proc_fault_inject_write,
915 #endif
917 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
919 struct inode *inode = dentry->d_inode;
920 int error = -EACCES;
922 /* We don't need a base pointer in the /proc filesystem */
923 path_release(nd);
925 /* Are we allowed to snoop on the tasks file descriptors? */
926 if (!proc_fd_access_allowed(inode))
927 goto out;
929 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
930 nd->last_type = LAST_BIND;
931 out:
932 return ERR_PTR(error);
935 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
936 char __user *buffer, int buflen)
938 struct inode * inode;
939 char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
940 int len;
942 if (!tmp)
943 return -ENOMEM;
945 inode = dentry->d_inode;
946 path = d_path(dentry, mnt, tmp, PAGE_SIZE);
947 len = PTR_ERR(path);
948 if (IS_ERR(path))
949 goto out;
950 len = tmp + PAGE_SIZE - 1 - path;
952 if (len > buflen)
953 len = buflen;
954 if (copy_to_user(buffer, path, len))
955 len = -EFAULT;
956 out:
957 free_page((unsigned long)tmp);
958 return len;
961 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
963 int error = -EACCES;
964 struct inode *inode = dentry->d_inode;
965 struct dentry *de;
966 struct vfsmount *mnt = NULL;
968 /* Are we allowed to snoop on the tasks file descriptors? */
969 if (!proc_fd_access_allowed(inode))
970 goto out;
972 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
973 if (error)
974 goto out;
976 error = do_proc_readlink(de, mnt, buffer, buflen);
977 dput(de);
978 mntput(mnt);
979 out:
980 return error;
983 static struct inode_operations proc_pid_link_inode_operations = {
984 .readlink = proc_pid_readlink,
985 .follow_link = proc_pid_follow_link,
986 .setattr = proc_setattr,
990 /* building an inode */
992 static int task_dumpable(struct task_struct *task)
994 int dumpable = 0;
995 struct mm_struct *mm;
997 task_lock(task);
998 mm = task->mm;
999 if (mm)
1000 dumpable = mm->dumpable;
1001 task_unlock(task);
1002 if(dumpable == 1)
1003 return 1;
1004 return 0;
1008 static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1010 struct inode * inode;
1011 struct proc_inode *ei;
1013 /* We need a new inode */
1015 inode = new_inode(sb);
1016 if (!inode)
1017 goto out;
1019 /* Common stuff */
1020 ei = PROC_I(inode);
1021 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1022 inode->i_op = &proc_def_inode_operations;
1025 * grab the reference to task.
1027 ei->pid = get_task_pid(task, PIDTYPE_PID);
1028 if (!ei->pid)
1029 goto out_unlock;
1031 inode->i_uid = 0;
1032 inode->i_gid = 0;
1033 if (task_dumpable(task)) {
1034 inode->i_uid = task->euid;
1035 inode->i_gid = task->egid;
1037 security_task_to_inode(task, inode);
1039 out:
1040 return inode;
1042 out_unlock:
1043 iput(inode);
1044 return NULL;
1047 static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1049 struct inode *inode = dentry->d_inode;
1050 struct task_struct *task;
1051 generic_fillattr(inode, stat);
1053 rcu_read_lock();
1054 stat->uid = 0;
1055 stat->gid = 0;
1056 task = pid_task(proc_pid(inode), PIDTYPE_PID);
1057 if (task) {
1058 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1059 task_dumpable(task)) {
1060 stat->uid = task->euid;
1061 stat->gid = task->egid;
1064 rcu_read_unlock();
1065 return 0;
1068 /* dentry stuff */
1071 * Exceptional case: normally we are not allowed to unhash a busy
1072 * directory. In this case, however, we can do it - no aliasing problems
1073 * due to the way we treat inodes.
1075 * Rewrite the inode's ownerships here because the owning task may have
1076 * performed a setuid(), etc.
1078 * Before the /proc/pid/status file was created the only way to read
1079 * the effective uid of a /process was to stat /proc/pid. Reading
1080 * /proc/pid/status is slow enough that procps and other packages
1081 * kept stating /proc/pid. To keep the rules in /proc simple I have
1082 * made this apply to all per process world readable and executable
1083 * directories.
1085 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1087 struct inode *inode = dentry->d_inode;
1088 struct task_struct *task = get_proc_task(inode);
1089 if (task) {
1090 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1091 task_dumpable(task)) {
1092 inode->i_uid = task->euid;
1093 inode->i_gid = task->egid;
1094 } else {
1095 inode->i_uid = 0;
1096 inode->i_gid = 0;
1098 inode->i_mode &= ~(S_ISUID | S_ISGID);
1099 security_task_to_inode(task, inode);
1100 put_task_struct(task);
1101 return 1;
1103 d_drop(dentry);
1104 return 0;
1107 static int pid_delete_dentry(struct dentry * dentry)
1109 /* Is the task we represent dead?
1110 * If so, then don't put the dentry on the lru list,
1111 * kill it immediately.
1113 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1116 static struct dentry_operations pid_dentry_operations =
1118 .d_revalidate = pid_revalidate,
1119 .d_delete = pid_delete_dentry,
1122 /* Lookups */
1124 typedef struct dentry *instantiate_t(struct inode *, struct dentry *, struct task_struct *, void *);
1127 * Fill a directory entry.
1129 * If possible create the dcache entry and derive our inode number and
1130 * file type from dcache entry.
1132 * Since all of the proc inode numbers are dynamically generated, the inode
1133 * numbers do not exist until the inode is cache. This means creating the
1134 * the dcache entry in readdir is necessary to keep the inode numbers
1135 * reported by readdir in sync with the inode numbers reported
1136 * by stat.
1138 static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1139 char *name, int len,
1140 instantiate_t instantiate, struct task_struct *task, void *ptr)
1142 struct dentry *child, *dir = filp->f_path.dentry;
1143 struct inode *inode;
1144 struct qstr qname;
1145 ino_t ino = 0;
1146 unsigned type = DT_UNKNOWN;
1148 qname.name = name;
1149 qname.len = len;
1150 qname.hash = full_name_hash(name, len);
1152 child = d_lookup(dir, &qname);
1153 if (!child) {
1154 struct dentry *new;
1155 new = d_alloc(dir, &qname);
1156 if (new) {
1157 child = instantiate(dir->d_inode, new, task, ptr);
1158 if (child)
1159 dput(new);
1160 else
1161 child = new;
1164 if (!child || IS_ERR(child) || !child->d_inode)
1165 goto end_instantiate;
1166 inode = child->d_inode;
1167 if (inode) {
1168 ino = inode->i_ino;
1169 type = inode->i_mode >> 12;
1171 dput(child);
1172 end_instantiate:
1173 if (!ino)
1174 ino = find_inode_number(dir, &qname);
1175 if (!ino)
1176 ino = 1;
1177 return filldir(dirent, name, len, filp->f_pos, ino, type);
1180 static unsigned name_to_int(struct dentry *dentry)
1182 const char *name = dentry->d_name.name;
1183 int len = dentry->d_name.len;
1184 unsigned n = 0;
1186 if (len > 1 && *name == '0')
1187 goto out;
1188 while (len-- > 0) {
1189 unsigned c = *name++ - '0';
1190 if (c > 9)
1191 goto out;
1192 if (n >= (~0U-9)/10)
1193 goto out;
1194 n *= 10;
1195 n += c;
1197 return n;
1198 out:
1199 return ~0U;
1202 static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
1204 struct task_struct *task = get_proc_task(inode);
1205 struct files_struct *files = NULL;
1206 struct file *file;
1207 int fd = proc_fd(inode);
1209 if (task) {
1210 files = get_files_struct(task);
1211 put_task_struct(task);
1213 if (files) {
1215 * We are not taking a ref to the file structure, so we must
1216 * hold ->file_lock.
1218 spin_lock(&files->file_lock);
1219 file = fcheck_files(files, fd);
1220 if (file) {
1221 *mnt = mntget(file->f_path.mnt);
1222 *dentry = dget(file->f_path.dentry);
1223 spin_unlock(&files->file_lock);
1224 put_files_struct(files);
1225 return 0;
1227 spin_unlock(&files->file_lock);
1228 put_files_struct(files);
1230 return -ENOENT;
1233 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1235 struct inode *inode = dentry->d_inode;
1236 struct task_struct *task = get_proc_task(inode);
1237 int fd = proc_fd(inode);
1238 struct files_struct *files;
1240 if (task) {
1241 files = get_files_struct(task);
1242 if (files) {
1243 rcu_read_lock();
1244 if (fcheck_files(files, fd)) {
1245 rcu_read_unlock();
1246 put_files_struct(files);
1247 if (task_dumpable(task)) {
1248 inode->i_uid = task->euid;
1249 inode->i_gid = task->egid;
1250 } else {
1251 inode->i_uid = 0;
1252 inode->i_gid = 0;
1254 inode->i_mode &= ~(S_ISUID | S_ISGID);
1255 security_task_to_inode(task, inode);
1256 put_task_struct(task);
1257 return 1;
1259 rcu_read_unlock();
1260 put_files_struct(files);
1262 put_task_struct(task);
1264 d_drop(dentry);
1265 return 0;
1268 static struct dentry_operations tid_fd_dentry_operations =
1270 .d_revalidate = tid_fd_revalidate,
1271 .d_delete = pid_delete_dentry,
1274 static struct dentry *proc_fd_instantiate(struct inode *dir,
1275 struct dentry *dentry, struct task_struct *task, void *ptr)
1277 unsigned fd = *(unsigned *)ptr;
1278 struct file *file;
1279 struct files_struct *files;
1280 struct inode *inode;
1281 struct proc_inode *ei;
1282 struct dentry *error = ERR_PTR(-ENOENT);
1284 inode = proc_pid_make_inode(dir->i_sb, task);
1285 if (!inode)
1286 goto out;
1287 ei = PROC_I(inode);
1288 ei->fd = fd;
1289 files = get_files_struct(task);
1290 if (!files)
1291 goto out_iput;
1292 inode->i_mode = S_IFLNK;
1295 * We are not taking a ref to the file structure, so we must
1296 * hold ->file_lock.
1298 spin_lock(&files->file_lock);
1299 file = fcheck_files(files, fd);
1300 if (!file)
1301 goto out_unlock;
1302 if (file->f_mode & 1)
1303 inode->i_mode |= S_IRUSR | S_IXUSR;
1304 if (file->f_mode & 2)
1305 inode->i_mode |= S_IWUSR | S_IXUSR;
1306 spin_unlock(&files->file_lock);
1307 put_files_struct(files);
1309 inode->i_op = &proc_pid_link_inode_operations;
1310 inode->i_size = 64;
1311 ei->op.proc_get_link = proc_fd_link;
1312 dentry->d_op = &tid_fd_dentry_operations;
1313 d_add(dentry, inode);
1314 /* Close the race of the process dying before we return the dentry */
1315 if (tid_fd_revalidate(dentry, NULL))
1316 error = NULL;
1318 out:
1319 return error;
1320 out_unlock:
1321 spin_unlock(&files->file_lock);
1322 put_files_struct(files);
1323 out_iput:
1324 iput(inode);
1325 goto out;
1328 static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
1330 struct task_struct *task = get_proc_task(dir);
1331 unsigned fd = name_to_int(dentry);
1332 struct dentry *result = ERR_PTR(-ENOENT);
1334 if (!task)
1335 goto out_no_task;
1336 if (fd == ~0U)
1337 goto out;
1339 result = proc_fd_instantiate(dir, dentry, task, &fd);
1340 out:
1341 put_task_struct(task);
1342 out_no_task:
1343 return result;
1346 static int proc_fd_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1347 struct task_struct *task, int fd)
1349 char name[PROC_NUMBUF];
1350 int len = snprintf(name, sizeof(name), "%d", fd);
1351 return proc_fill_cache(filp, dirent, filldir, name, len,
1352 proc_fd_instantiate, task, &fd);
1355 static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1357 struct dentry *dentry = filp->f_path.dentry;
1358 struct inode *inode = dentry->d_inode;
1359 struct task_struct *p = get_proc_task(inode);
1360 unsigned int fd, tid, ino;
1361 int retval;
1362 struct files_struct * files;
1363 struct fdtable *fdt;
1365 retval = -ENOENT;
1366 if (!p)
1367 goto out_no_task;
1368 retval = 0;
1369 tid = p->pid;
1371 fd = filp->f_pos;
1372 switch (fd) {
1373 case 0:
1374 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
1375 goto out;
1376 filp->f_pos++;
1377 case 1:
1378 ino = parent_ino(dentry);
1379 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1380 goto out;
1381 filp->f_pos++;
1382 default:
1383 files = get_files_struct(p);
1384 if (!files)
1385 goto out;
1386 rcu_read_lock();
1387 fdt = files_fdtable(files);
1388 for (fd = filp->f_pos-2;
1389 fd < fdt->max_fds;
1390 fd++, filp->f_pos++) {
1392 if (!fcheck_files(files, fd))
1393 continue;
1394 rcu_read_unlock();
1396 if (proc_fd_fill_cache(filp, dirent, filldir, p, fd) < 0) {
1397 rcu_read_lock();
1398 break;
1400 rcu_read_lock();
1402 rcu_read_unlock();
1403 put_files_struct(files);
1405 out:
1406 put_task_struct(p);
1407 out_no_task:
1408 return retval;
1411 static struct file_operations proc_fd_operations = {
1412 .read = generic_read_dir,
1413 .readdir = proc_readfd,
1417 * proc directories can do almost nothing..
1419 static struct inode_operations proc_fd_inode_operations = {
1420 .lookup = proc_lookupfd,
1421 .setattr = proc_setattr,
1424 static struct dentry *proc_pident_instantiate(struct inode *dir,
1425 struct dentry *dentry, struct task_struct *task, void *ptr)
1427 struct pid_entry *p = ptr;
1428 struct inode *inode;
1429 struct proc_inode *ei;
1430 struct dentry *error = ERR_PTR(-EINVAL);
1432 inode = proc_pid_make_inode(dir->i_sb, task);
1433 if (!inode)
1434 goto out;
1436 ei = PROC_I(inode);
1437 inode->i_mode = p->mode;
1438 if (S_ISDIR(inode->i_mode))
1439 inode->i_nlink = 2; /* Use getattr to fix if necessary */
1440 if (p->iop)
1441 inode->i_op = p->iop;
1442 if (p->fop)
1443 inode->i_fop = p->fop;
1444 ei->op = p->op;
1445 dentry->d_op = &pid_dentry_operations;
1446 d_add(dentry, inode);
1447 /* Close the race of the process dying before we return the dentry */
1448 if (pid_revalidate(dentry, NULL))
1449 error = NULL;
1450 out:
1451 return error;
1454 static struct dentry *proc_pident_lookup(struct inode *dir,
1455 struct dentry *dentry,
1456 struct pid_entry *ents,
1457 unsigned int nents)
1459 struct inode *inode;
1460 struct dentry *error;
1461 struct task_struct *task = get_proc_task(dir);
1462 struct pid_entry *p, *last;
1464 error = ERR_PTR(-ENOENT);
1465 inode = NULL;
1467 if (!task)
1468 goto out_no_task;
1471 * Yes, it does not scale. And it should not. Don't add
1472 * new entries into /proc/<tgid>/ without very good reasons.
1474 last = &ents[nents - 1];
1475 for (p = ents; p <= last; p++) {
1476 if (p->len != dentry->d_name.len)
1477 continue;
1478 if (!memcmp(dentry->d_name.name, p->name, p->len))
1479 break;
1481 if (p > last)
1482 goto out;
1484 error = proc_pident_instantiate(dir, dentry, task, p);
1485 out:
1486 put_task_struct(task);
1487 out_no_task:
1488 return error;
1491 static int proc_pident_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1492 struct task_struct *task, struct pid_entry *p)
1494 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1495 proc_pident_instantiate, task, p);
1498 static int proc_pident_readdir(struct file *filp,
1499 void *dirent, filldir_t filldir,
1500 struct pid_entry *ents, unsigned int nents)
1502 int i;
1503 int pid;
1504 struct dentry *dentry = filp->f_path.dentry;
1505 struct inode *inode = dentry->d_inode;
1506 struct task_struct *task = get_proc_task(inode);
1507 struct pid_entry *p, *last;
1508 ino_t ino;
1509 int ret;
1511 ret = -ENOENT;
1512 if (!task)
1513 goto out_no_task;
1515 ret = 0;
1516 pid = task->pid;
1517 i = filp->f_pos;
1518 switch (i) {
1519 case 0:
1520 ino = inode->i_ino;
1521 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
1522 goto out;
1523 i++;
1524 filp->f_pos++;
1525 /* fall through */
1526 case 1:
1527 ino = parent_ino(dentry);
1528 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
1529 goto out;
1530 i++;
1531 filp->f_pos++;
1532 /* fall through */
1533 default:
1534 i -= 2;
1535 if (i >= nents) {
1536 ret = 1;
1537 goto out;
1539 p = ents + i;
1540 last = &ents[nents - 1];
1541 while (p <= last) {
1542 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
1543 goto out;
1544 filp->f_pos++;
1545 p++;
1549 ret = 1;
1550 out:
1551 put_task_struct(task);
1552 out_no_task:
1553 return ret;
1556 #ifdef CONFIG_SECURITY
1557 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
1558 size_t count, loff_t *ppos)
1560 struct inode * inode = file->f_path.dentry->d_inode;
1561 unsigned long page;
1562 ssize_t length;
1563 struct task_struct *task = get_proc_task(inode);
1565 length = -ESRCH;
1566 if (!task)
1567 goto out_no_task;
1569 if (count > PAGE_SIZE)
1570 count = PAGE_SIZE;
1571 length = -ENOMEM;
1572 if (!(page = __get_free_page(GFP_KERNEL)))
1573 goto out;
1575 length = security_getprocattr(task,
1576 (char*)file->f_path.dentry->d_name.name,
1577 (void*)page, count);
1578 if (length >= 0)
1579 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
1580 free_page(page);
1581 out:
1582 put_task_struct(task);
1583 out_no_task:
1584 return length;
1587 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
1588 size_t count, loff_t *ppos)
1590 struct inode * inode = file->f_path.dentry->d_inode;
1591 char *page;
1592 ssize_t length;
1593 struct task_struct *task = get_proc_task(inode);
1595 length = -ESRCH;
1596 if (!task)
1597 goto out_no_task;
1598 if (count > PAGE_SIZE)
1599 count = PAGE_SIZE;
1601 /* No partial writes. */
1602 length = -EINVAL;
1603 if (*ppos != 0)
1604 goto out;
1606 length = -ENOMEM;
1607 page = (char*)__get_free_page(GFP_USER);
1608 if (!page)
1609 goto out;
1611 length = -EFAULT;
1612 if (copy_from_user(page, buf, count))
1613 goto out_free;
1615 length = security_setprocattr(task,
1616 (char*)file->f_path.dentry->d_name.name,
1617 (void*)page, count);
1618 out_free:
1619 free_page((unsigned long) page);
1620 out:
1621 put_task_struct(task);
1622 out_no_task:
1623 return length;
1626 static struct file_operations proc_pid_attr_operations = {
1627 .read = proc_pid_attr_read,
1628 .write = proc_pid_attr_write,
1631 static struct pid_entry attr_dir_stuff[] = {
1632 REG("current", S_IRUGO|S_IWUGO, pid_attr),
1633 REG("prev", S_IRUGO, pid_attr),
1634 REG("exec", S_IRUGO|S_IWUGO, pid_attr),
1635 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr),
1636 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr),
1637 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr),
1640 static int proc_attr_dir_readdir(struct file * filp,
1641 void * dirent, filldir_t filldir)
1643 return proc_pident_readdir(filp,dirent,filldir,
1644 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
1647 static struct file_operations proc_attr_dir_operations = {
1648 .read = generic_read_dir,
1649 .readdir = proc_attr_dir_readdir,
1652 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
1653 struct dentry *dentry, struct nameidata *nd)
1655 return proc_pident_lookup(dir, dentry,
1656 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
1659 static struct inode_operations proc_attr_dir_inode_operations = {
1660 .lookup = proc_attr_dir_lookup,
1661 .getattr = pid_getattr,
1662 .setattr = proc_setattr,
1665 #endif
1668 * /proc/self:
1670 static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
1671 int buflen)
1673 char tmp[PROC_NUMBUF];
1674 sprintf(tmp, "%d", current->tgid);
1675 return vfs_readlink(dentry,buffer,buflen,tmp);
1678 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
1680 char tmp[PROC_NUMBUF];
1681 sprintf(tmp, "%d", current->tgid);
1682 return ERR_PTR(vfs_follow_link(nd,tmp));
1685 static struct inode_operations proc_self_inode_operations = {
1686 .readlink = proc_self_readlink,
1687 .follow_link = proc_self_follow_link,
1691 * proc base
1693 * These are the directory entries in the root directory of /proc
1694 * that properly belong to the /proc filesystem, as they describe
1695 * describe something that is process related.
1697 static struct pid_entry proc_base_stuff[] = {
1698 NOD("self", S_IFLNK|S_IRWXUGO,
1699 &proc_self_inode_operations, NULL, {}),
1703 * Exceptional case: normally we are not allowed to unhash a busy
1704 * directory. In this case, however, we can do it - no aliasing problems
1705 * due to the way we treat inodes.
1707 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
1709 struct inode *inode = dentry->d_inode;
1710 struct task_struct *task = get_proc_task(inode);
1711 if (task) {
1712 put_task_struct(task);
1713 return 1;
1715 d_drop(dentry);
1716 return 0;
1719 static struct dentry_operations proc_base_dentry_operations =
1721 .d_revalidate = proc_base_revalidate,
1722 .d_delete = pid_delete_dentry,
1725 static struct dentry *proc_base_instantiate(struct inode *dir,
1726 struct dentry *dentry, struct task_struct *task, void *ptr)
1728 struct pid_entry *p = ptr;
1729 struct inode *inode;
1730 struct proc_inode *ei;
1731 struct dentry *error = ERR_PTR(-EINVAL);
1733 /* Allocate the inode */
1734 error = ERR_PTR(-ENOMEM);
1735 inode = new_inode(dir->i_sb);
1736 if (!inode)
1737 goto out;
1739 /* Initialize the inode */
1740 ei = PROC_I(inode);
1741 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1744 * grab the reference to the task.
1746 ei->pid = get_task_pid(task, PIDTYPE_PID);
1747 if (!ei->pid)
1748 goto out_iput;
1750 inode->i_uid = 0;
1751 inode->i_gid = 0;
1752 inode->i_mode = p->mode;
1753 if (S_ISDIR(inode->i_mode))
1754 inode->i_nlink = 2;
1755 if (S_ISLNK(inode->i_mode))
1756 inode->i_size = 64;
1757 if (p->iop)
1758 inode->i_op = p->iop;
1759 if (p->fop)
1760 inode->i_fop = p->fop;
1761 ei->op = p->op;
1762 dentry->d_op = &proc_base_dentry_operations;
1763 d_add(dentry, inode);
1764 error = NULL;
1765 out:
1766 return error;
1767 out_iput:
1768 iput(inode);
1769 goto out;
1772 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
1774 struct dentry *error;
1775 struct task_struct *task = get_proc_task(dir);
1776 struct pid_entry *p, *last;
1778 error = ERR_PTR(-ENOENT);
1780 if (!task)
1781 goto out_no_task;
1783 /* Lookup the directory entry */
1784 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
1785 for (p = proc_base_stuff; p <= last; p++) {
1786 if (p->len != dentry->d_name.len)
1787 continue;
1788 if (!memcmp(dentry->d_name.name, p->name, p->len))
1789 break;
1791 if (p > last)
1792 goto out;
1794 error = proc_base_instantiate(dir, dentry, task, p);
1796 out:
1797 put_task_struct(task);
1798 out_no_task:
1799 return error;
1802 static int proc_base_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1803 struct task_struct *task, struct pid_entry *p)
1805 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1806 proc_base_instantiate, task, p);
1809 #ifdef CONFIG_TASK_IO_ACCOUNTING
1810 static int proc_pid_io_accounting(struct task_struct *task, char *buffer)
1812 return sprintf(buffer,
1813 "rchar: %llu\n"
1814 "wchar: %llu\n"
1815 "syscr: %llu\n"
1816 "syscw: %llu\n"
1817 "read_bytes: %llu\n"
1818 "write_bytes: %llu\n"
1819 "cancelled_write_bytes: %llu\n",
1820 (unsigned long long)task->rchar,
1821 (unsigned long long)task->wchar,
1822 (unsigned long long)task->syscr,
1823 (unsigned long long)task->syscw,
1824 (unsigned long long)task->ioac.read_bytes,
1825 (unsigned long long)task->ioac.write_bytes,
1826 (unsigned long long)task->ioac.cancelled_write_bytes);
1828 #endif
1831 * Thread groups
1833 static struct file_operations proc_task_operations;
1834 static struct inode_operations proc_task_inode_operations;
1836 static struct pid_entry tgid_base_stuff[] = {
1837 DIR("task", S_IRUGO|S_IXUGO, task),
1838 DIR("fd", S_IRUSR|S_IXUSR, fd),
1839 INF("environ", S_IRUSR, pid_environ),
1840 INF("auxv", S_IRUSR, pid_auxv),
1841 INF("status", S_IRUGO, pid_status),
1842 INF("cmdline", S_IRUGO, pid_cmdline),
1843 INF("stat", S_IRUGO, tgid_stat),
1844 INF("statm", S_IRUGO, pid_statm),
1845 REG("maps", S_IRUGO, maps),
1846 #ifdef CONFIG_NUMA
1847 REG("numa_maps", S_IRUGO, numa_maps),
1848 #endif
1849 REG("mem", S_IRUSR|S_IWUSR, mem),
1850 #ifdef CONFIG_SECCOMP
1851 REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
1852 #endif
1853 LNK("cwd", cwd),
1854 LNK("root", root),
1855 LNK("exe", exe),
1856 REG("mounts", S_IRUGO, mounts),
1857 REG("mountstats", S_IRUSR, mountstats),
1858 #ifdef CONFIG_MMU
1859 REG("smaps", S_IRUGO, smaps),
1860 #endif
1861 #ifdef CONFIG_SECURITY
1862 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
1863 #endif
1864 #ifdef CONFIG_KALLSYMS
1865 INF("wchan", S_IRUGO, pid_wchan),
1866 #endif
1867 #ifdef CONFIG_SCHEDSTATS
1868 INF("schedstat", S_IRUGO, pid_schedstat),
1869 #endif
1870 #ifdef CONFIG_CPUSETS
1871 REG("cpuset", S_IRUGO, cpuset),
1872 #endif
1873 INF("oom_score", S_IRUGO, oom_score),
1874 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
1875 #ifdef CONFIG_AUDITSYSCALL
1876 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
1877 #endif
1878 #ifdef CONFIG_FAULT_INJECTION
1879 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
1880 #endif
1881 #ifdef CONFIG_TASK_IO_ACCOUNTING
1882 INF("io", S_IRUGO, pid_io_accounting),
1883 #endif
1886 static int proc_tgid_base_readdir(struct file * filp,
1887 void * dirent, filldir_t filldir)
1889 return proc_pident_readdir(filp,dirent,filldir,
1890 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
1893 static struct file_operations proc_tgid_base_operations = {
1894 .read = generic_read_dir,
1895 .readdir = proc_tgid_base_readdir,
1898 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
1899 return proc_pident_lookup(dir, dentry,
1900 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
1903 static struct inode_operations proc_tgid_base_inode_operations = {
1904 .lookup = proc_tgid_base_lookup,
1905 .getattr = pid_getattr,
1906 .setattr = proc_setattr,
1910 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
1912 * @task: task that should be flushed.
1914 * Looks in the dcache for
1915 * /proc/@pid
1916 * /proc/@tgid/task/@pid
1917 * if either directory is present flushes it and all of it'ts children
1918 * from the dcache.
1920 * It is safe and reasonable to cache /proc entries for a task until
1921 * that task exits. After that they just clog up the dcache with
1922 * useless entries, possibly causing useful dcache entries to be
1923 * flushed instead. This routine is proved to flush those useless
1924 * dcache entries at process exit time.
1926 * NOTE: This routine is just an optimization so it does not guarantee
1927 * that no dcache entries will exist at process exit time it
1928 * just makes it very unlikely that any will persist.
1930 void proc_flush_task(struct task_struct *task)
1932 struct dentry *dentry, *leader, *dir;
1933 char buf[PROC_NUMBUF];
1934 struct qstr name;
1936 name.name = buf;
1937 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
1938 dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
1939 if (dentry) {
1940 shrink_dcache_parent(dentry);
1941 d_drop(dentry);
1942 dput(dentry);
1945 if (thread_group_leader(task))
1946 goto out;
1948 name.name = buf;
1949 name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
1950 leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
1951 if (!leader)
1952 goto out;
1954 name.name = "task";
1955 name.len = strlen(name.name);
1956 dir = d_hash_and_lookup(leader, &name);
1957 if (!dir)
1958 goto out_put_leader;
1960 name.name = buf;
1961 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
1962 dentry = d_hash_and_lookup(dir, &name);
1963 if (dentry) {
1964 shrink_dcache_parent(dentry);
1965 d_drop(dentry);
1966 dput(dentry);
1969 dput(dir);
1970 out_put_leader:
1971 dput(leader);
1972 out:
1973 return;
1976 static struct dentry *proc_pid_instantiate(struct inode *dir,
1977 struct dentry * dentry,
1978 struct task_struct *task, void *ptr)
1980 struct dentry *error = ERR_PTR(-ENOENT);
1981 struct inode *inode;
1983 inode = proc_pid_make_inode(dir->i_sb, task);
1984 if (!inode)
1985 goto out;
1987 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
1988 inode->i_op = &proc_tgid_base_inode_operations;
1989 inode->i_fop = &proc_tgid_base_operations;
1990 inode->i_flags|=S_IMMUTABLE;
1991 inode->i_nlink = 4;
1992 #ifdef CONFIG_SECURITY
1993 inode->i_nlink += 1;
1994 #endif
1996 dentry->d_op = &pid_dentry_operations;
1998 d_add(dentry, inode);
1999 /* Close the race of the process dying before we return the dentry */
2000 if (pid_revalidate(dentry, NULL))
2001 error = NULL;
2002 out:
2003 return error;
2006 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2008 struct dentry *result = ERR_PTR(-ENOENT);
2009 struct task_struct *task;
2010 unsigned tgid;
2012 result = proc_base_lookup(dir, dentry);
2013 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
2014 goto out;
2016 tgid = name_to_int(dentry);
2017 if (tgid == ~0U)
2018 goto out;
2020 rcu_read_lock();
2021 task = find_task_by_pid(tgid);
2022 if (task)
2023 get_task_struct(task);
2024 rcu_read_unlock();
2025 if (!task)
2026 goto out;
2028 result = proc_pid_instantiate(dir, dentry, task, NULL);
2029 put_task_struct(task);
2030 out:
2031 return result;
2035 * Find the first task with tgid >= tgid
2038 static struct task_struct *next_tgid(unsigned int tgid)
2040 struct task_struct *task;
2041 struct pid *pid;
2043 rcu_read_lock();
2044 retry:
2045 task = NULL;
2046 pid = find_ge_pid(tgid);
2047 if (pid) {
2048 tgid = pid->nr + 1;
2049 task = pid_task(pid, PIDTYPE_PID);
2050 /* What we to know is if the pid we have find is the
2051 * pid of a thread_group_leader. Testing for task
2052 * being a thread_group_leader is the obvious thing
2053 * todo but there is a window when it fails, due to
2054 * the pid transfer logic in de_thread.
2056 * So we perform the straight forward test of seeing
2057 * if the pid we have found is the pid of a thread
2058 * group leader, and don't worry if the task we have
2059 * found doesn't happen to be a thread group leader.
2060 * As we don't care in the case of readdir.
2062 if (!task || !has_group_leader_pid(task))
2063 goto retry;
2064 get_task_struct(task);
2066 rcu_read_unlock();
2067 return task;
2070 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
2072 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2073 struct task_struct *task, int tgid)
2075 char name[PROC_NUMBUF];
2076 int len = snprintf(name, sizeof(name), "%d", tgid);
2077 return proc_fill_cache(filp, dirent, filldir, name, len,
2078 proc_pid_instantiate, task, NULL);
2081 /* for the /proc/ directory itself, after non-process stuff has been done */
2082 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
2084 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
2085 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
2086 struct task_struct *task;
2087 int tgid;
2089 if (!reaper)
2090 goto out_no_task;
2092 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
2093 struct pid_entry *p = &proc_base_stuff[nr];
2094 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
2095 goto out;
2098 tgid = filp->f_pos - TGID_OFFSET;
2099 for (task = next_tgid(tgid);
2100 task;
2101 put_task_struct(task), task = next_tgid(tgid + 1)) {
2102 tgid = task->pid;
2103 filp->f_pos = tgid + TGID_OFFSET;
2104 if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) {
2105 put_task_struct(task);
2106 goto out;
2109 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
2110 out:
2111 put_task_struct(reaper);
2112 out_no_task:
2113 return 0;
2117 * Tasks
2119 static struct pid_entry tid_base_stuff[] = {
2120 DIR("fd", S_IRUSR|S_IXUSR, fd),
2121 INF("environ", S_IRUSR, pid_environ),
2122 INF("auxv", S_IRUSR, pid_auxv),
2123 INF("status", S_IRUGO, pid_status),
2124 INF("cmdline", S_IRUGO, pid_cmdline),
2125 INF("stat", S_IRUGO, tid_stat),
2126 INF("statm", S_IRUGO, pid_statm),
2127 REG("maps", S_IRUGO, maps),
2128 #ifdef CONFIG_NUMA
2129 REG("numa_maps", S_IRUGO, numa_maps),
2130 #endif
2131 REG("mem", S_IRUSR|S_IWUSR, mem),
2132 #ifdef CONFIG_SECCOMP
2133 REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
2134 #endif
2135 LNK("cwd", cwd),
2136 LNK("root", root),
2137 LNK("exe", exe),
2138 REG("mounts", S_IRUGO, mounts),
2139 #ifdef CONFIG_MMU
2140 REG("smaps", S_IRUGO, smaps),
2141 #endif
2142 #ifdef CONFIG_SECURITY
2143 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
2144 #endif
2145 #ifdef CONFIG_KALLSYMS
2146 INF("wchan", S_IRUGO, pid_wchan),
2147 #endif
2148 #ifdef CONFIG_SCHEDSTATS
2149 INF("schedstat", S_IRUGO, pid_schedstat),
2150 #endif
2151 #ifdef CONFIG_CPUSETS
2152 REG("cpuset", S_IRUGO, cpuset),
2153 #endif
2154 INF("oom_score", S_IRUGO, oom_score),
2155 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2156 #ifdef CONFIG_AUDITSYSCALL
2157 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2158 #endif
2159 #ifdef CONFIG_FAULT_INJECTION
2160 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
2161 #endif
2164 static int proc_tid_base_readdir(struct file * filp,
2165 void * dirent, filldir_t filldir)
2167 return proc_pident_readdir(filp,dirent,filldir,
2168 tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
2171 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
2172 return proc_pident_lookup(dir, dentry,
2173 tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
2176 static struct file_operations proc_tid_base_operations = {
2177 .read = generic_read_dir,
2178 .readdir = proc_tid_base_readdir,
2181 static struct inode_operations proc_tid_base_inode_operations = {
2182 .lookup = proc_tid_base_lookup,
2183 .getattr = pid_getattr,
2184 .setattr = proc_setattr,
2187 static struct dentry *proc_task_instantiate(struct inode *dir,
2188 struct dentry *dentry, struct task_struct *task, void *ptr)
2190 struct dentry *error = ERR_PTR(-ENOENT);
2191 struct inode *inode;
2192 inode = proc_pid_make_inode(dir->i_sb, task);
2194 if (!inode)
2195 goto out;
2196 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2197 inode->i_op = &proc_tid_base_inode_operations;
2198 inode->i_fop = &proc_tid_base_operations;
2199 inode->i_flags|=S_IMMUTABLE;
2200 inode->i_nlink = 3;
2201 #ifdef CONFIG_SECURITY
2202 inode->i_nlink += 1;
2203 #endif
2205 dentry->d_op = &pid_dentry_operations;
2207 d_add(dentry, inode);
2208 /* Close the race of the process dying before we return the dentry */
2209 if (pid_revalidate(dentry, NULL))
2210 error = NULL;
2211 out:
2212 return error;
2215 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2217 struct dentry *result = ERR_PTR(-ENOENT);
2218 struct task_struct *task;
2219 struct task_struct *leader = get_proc_task(dir);
2220 unsigned tid;
2222 if (!leader)
2223 goto out_no_task;
2225 tid = name_to_int(dentry);
2226 if (tid == ~0U)
2227 goto out;
2229 rcu_read_lock();
2230 task = find_task_by_pid(tid);
2231 if (task)
2232 get_task_struct(task);
2233 rcu_read_unlock();
2234 if (!task)
2235 goto out;
2236 if (leader->tgid != task->tgid)
2237 goto out_drop_task;
2239 result = proc_task_instantiate(dir, dentry, task, NULL);
2240 out_drop_task:
2241 put_task_struct(task);
2242 out:
2243 put_task_struct(leader);
2244 out_no_task:
2245 return result;
2249 * Find the first tid of a thread group to return to user space.
2251 * Usually this is just the thread group leader, but if the users
2252 * buffer was too small or there was a seek into the middle of the
2253 * directory we have more work todo.
2255 * In the case of a short read we start with find_task_by_pid.
2257 * In the case of a seek we start with the leader and walk nr
2258 * threads past it.
2260 static struct task_struct *first_tid(struct task_struct *leader,
2261 int tid, int nr)
2263 struct task_struct *pos;
2265 rcu_read_lock();
2266 /* Attempt to start with the pid of a thread */
2267 if (tid && (nr > 0)) {
2268 pos = find_task_by_pid(tid);
2269 if (pos && (pos->group_leader == leader))
2270 goto found;
2273 /* If nr exceeds the number of threads there is nothing todo */
2274 pos = NULL;
2275 if (nr && nr >= get_nr_threads(leader))
2276 goto out;
2278 /* If we haven't found our starting place yet start
2279 * with the leader and walk nr threads forward.
2281 for (pos = leader; nr > 0; --nr) {
2282 pos = next_thread(pos);
2283 if (pos == leader) {
2284 pos = NULL;
2285 goto out;
2288 found:
2289 get_task_struct(pos);
2290 out:
2291 rcu_read_unlock();
2292 return pos;
2296 * Find the next thread in the thread list.
2297 * Return NULL if there is an error or no next thread.
2299 * The reference to the input task_struct is released.
2301 static struct task_struct *next_tid(struct task_struct *start)
2303 struct task_struct *pos = NULL;
2304 rcu_read_lock();
2305 if (pid_alive(start)) {
2306 pos = next_thread(start);
2307 if (thread_group_leader(pos))
2308 pos = NULL;
2309 else
2310 get_task_struct(pos);
2312 rcu_read_unlock();
2313 put_task_struct(start);
2314 return pos;
2317 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2318 struct task_struct *task, int tid)
2320 char name[PROC_NUMBUF];
2321 int len = snprintf(name, sizeof(name), "%d", tid);
2322 return proc_fill_cache(filp, dirent, filldir, name, len,
2323 proc_task_instantiate, task, NULL);
2326 /* for the /proc/TGID/task/ directories */
2327 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
2329 struct dentry *dentry = filp->f_path.dentry;
2330 struct inode *inode = dentry->d_inode;
2331 struct task_struct *leader = NULL;
2332 struct task_struct *task;
2333 int retval = -ENOENT;
2334 ino_t ino;
2335 int tid;
2336 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
2338 task = get_proc_task(inode);
2339 if (!task)
2340 goto out_no_task;
2341 rcu_read_lock();
2342 if (pid_alive(task)) {
2343 leader = task->group_leader;
2344 get_task_struct(leader);
2346 rcu_read_unlock();
2347 put_task_struct(task);
2348 if (!leader)
2349 goto out_no_task;
2350 retval = 0;
2352 switch (pos) {
2353 case 0:
2354 ino = inode->i_ino;
2355 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
2356 goto out;
2357 pos++;
2358 /* fall through */
2359 case 1:
2360 ino = parent_ino(dentry);
2361 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
2362 goto out;
2363 pos++;
2364 /* fall through */
2367 /* f_version caches the tgid value that the last readdir call couldn't
2368 * return. lseek aka telldir automagically resets f_version to 0.
2370 tid = filp->f_version;
2371 filp->f_version = 0;
2372 for (task = first_tid(leader, tid, pos - 2);
2373 task;
2374 task = next_tid(task), pos++) {
2375 tid = task->pid;
2376 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
2377 /* returning this tgid failed, save it as the first
2378 * pid for the next readir call */
2379 filp->f_version = tid;
2380 put_task_struct(task);
2381 break;
2384 out:
2385 filp->f_pos = pos;
2386 put_task_struct(leader);
2387 out_no_task:
2388 return retval;
2391 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
2393 struct inode *inode = dentry->d_inode;
2394 struct task_struct *p = get_proc_task(inode);
2395 generic_fillattr(inode, stat);
2397 if (p) {
2398 rcu_read_lock();
2399 stat->nlink += get_nr_threads(p);
2400 rcu_read_unlock();
2401 put_task_struct(p);
2404 return 0;
2407 static struct inode_operations proc_task_inode_operations = {
2408 .lookup = proc_task_lookup,
2409 .getattr = proc_task_getattr,
2410 .setattr = proc_setattr,
2413 static struct file_operations proc_task_operations = {
2414 .read = generic_read_dir,
2415 .readdir = proc_task_readdir,