Import 2.3.26pre2
[davej-history.git] / fs / proc / array.c
blobb9dde9fd431d4ff67cc7ab1e4f1a971c1dd7dddc
1 /*
2 * linux/fs/proc/array.c
4 * Copyright (C) 1992 by Linus Torvalds
5 * based on ideas by Darren Senn
7 * Fixes:
8 * Michael. K. Johnson: stat,statm extensions.
9 * <johnsonm@stolaf.edu>
11 * Pauline Middelink : Made cmdline,envline only break at '\0's, to
12 * make sure SET_PROCTITLE works. Also removed
13 * bad '!' which forced address recalculation for
14 * EVERY character on the current page.
15 * <middelin@polyware.iaf.nl>
17 * Danny ter Haar : added cpuinfo
18 * <dth@cistron.nl>
20 * Alessandro Rubini : profile extension.
21 * <rubini@ipvvis.unipv.it>
23 * Jeff Tranter : added BogoMips field to cpuinfo
24 * <Jeff_Tranter@Mitel.COM>
26 * Bruno Haible : remove 4K limit for the maps file
27 * <haible@ma2s2.mathematik.uni-karlsruhe.de>
29 * Yves Arrouye : remove removal of trailing spaces in get_array.
30 * <Yves.Arrouye@marin.fdn.fr>
32 * Jerome Forissier : added per-CPU time information to /proc/stat
33 * and /proc/<pid>/cpu extension
34 * <forissier@isia.cma.fr>
35 * - Incorporation and non-SMP safe operation
36 * of forissier patch in 2.1.78 by
37 * Hans Marcus <crowbar@concepts.nl>
39 * aeb@cwi.nl : /proc/partitions
42 * Alan Cox : security fixes.
43 * <Alan.Cox@linux.org>
45 * Al Viro : safe handling of mm_struct
47 * Gerhard Wichert : added BIGMEM support
48 * Siemens AG <Gerhard.Wichert@pdb.siemens.de>
51 #include <linux/types.h>
52 #include <linux/errno.h>
53 #include <linux/sched.h>
54 #include <linux/kernel.h>
55 #include <linux/kernel_stat.h>
56 #include <linux/tty.h>
57 #include <linux/user.h>
58 #include <linux/a.out.h>
59 #include <linux/string.h>
60 #include <linux/mman.h>
61 #include <linux/proc_fs.h>
62 #include <linux/ioport.h>
63 #include <linux/config.h>
64 #include <linux/mm.h>
65 #include <linux/pagemap.h>
66 #include <linux/swap.h>
67 #include <linux/slab.h>
68 #include <linux/smp.h>
69 #include <linux/signal.h>
71 #include <asm/uaccess.h>
72 #include <asm/pgtable.h>
73 #include <asm/io.h>
76 static int open_kcore(struct inode * inode, struct file * filp)
78 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
81 extern ssize_t read_kcore(struct file *, char *, size_t, loff_t *);
83 static struct file_operations proc_kcore_operations = {
84 NULL, /* lseek */
85 read_kcore,
86 NULL, /* write */
87 NULL, /* readdir */
88 NULL, /* poll */
89 NULL, /* ioctl */
90 NULL, /* mmap */
91 open_kcore
94 struct inode_operations proc_kcore_inode_operations = {
95 &proc_kcore_operations,
99 * This function accesses profiling information. The returned data is
100 * binary: the sampling step and the actual contents of the profile
101 * buffer. Use of the program readprofile is recommended in order to
102 * get meaningful info out of these data.
104 static ssize_t read_profile(struct file *file, char *buf,
105 size_t count, loff_t *ppos)
107 unsigned long p = *ppos;
108 ssize_t read;
109 char * pnt;
110 unsigned int sample_step = 1 << prof_shift;
112 if (p >= (prof_len+1)*sizeof(unsigned int))
113 return 0;
114 if (count > (prof_len+1)*sizeof(unsigned int) - p)
115 count = (prof_len+1)*sizeof(unsigned int) - p;
116 read = 0;
118 while (p < sizeof(unsigned int) && count > 0) {
119 put_user(*((char *)(&sample_step)+p),buf);
120 buf++; p++; count--; read++;
122 pnt = (char *)prof_buffer + p - sizeof(unsigned int);
123 copy_to_user(buf,(void *)pnt,count);
124 read += count;
125 *ppos += read;
126 return read;
130 * Writing to /proc/profile resets the counters
132 * Writing a 'profiling multiplier' value into it also re-sets the profiling
133 * interrupt frequency, on architectures that support this.
135 static ssize_t write_profile(struct file * file, const char * buf,
136 size_t count, loff_t *ppos)
138 #ifdef __SMP__
139 extern int setup_profiling_timer (unsigned int multiplier);
141 if (count==sizeof(int)) {
142 unsigned int multiplier;
144 if (copy_from_user(&multiplier, buf, sizeof(int)))
145 return -EFAULT;
147 if (setup_profiling_timer(multiplier))
148 return -EINVAL;
150 #endif
152 memset(prof_buffer, 0, prof_len * sizeof(*prof_buffer));
153 return count;
156 static struct file_operations proc_profile_operations = {
157 NULL, /* lseek */
158 read_profile,
159 write_profile,
162 struct inode_operations proc_profile_inode_operations = {
163 &proc_profile_operations,
166 static struct page * get_phys_addr(struct mm_struct * mm, unsigned long ptr)
168 pgd_t *pgd;
169 pmd_t *pmd;
170 pte_t pte;
172 if (ptr >= TASK_SIZE)
173 return 0;
174 pgd = pgd_offset(mm,ptr);
175 if (pgd_none(*pgd))
176 return 0;
177 if (pgd_bad(*pgd)) {
178 pgd_ERROR(*pgd);
179 pgd_clear(pgd);
180 return 0;
182 pmd = pmd_offset(pgd,ptr);
183 if (pmd_none(*pmd))
184 return 0;
185 if (pmd_bad(*pmd)) {
186 pmd_ERROR(*pmd);
187 pmd_clear(pmd);
188 return 0;
190 pte = *pte_offset(pmd,ptr);
191 if (!pte_present(pte))
192 return 0;
193 return pte_page(pte);
196 static int get_array(struct mm_struct *mm, unsigned long start, unsigned long end, char * buffer)
198 struct page *page;
199 unsigned long kaddr;
200 int size = 0, result = 0;
201 char c;
203 if (start >= end)
204 return result;
205 for (;;) {
206 page = get_phys_addr(mm, start);
207 if (!page)
208 return result;
209 kaddr = kmap(page, KM_READ) + (start & ~PAGE_MASK);
210 do {
211 c = *(char *) kaddr;
212 if (!c)
213 result = size;
214 if (size < PAGE_SIZE)
215 buffer[size++] = c;
216 else {
217 kunmap(kaddr, KM_READ);
218 return result;
220 kaddr++;
221 start++;
222 if (!c && start >= end) {
223 kunmap(kaddr, KM_READ);
224 return result;
226 } while (kaddr & ~PAGE_MASK);
227 kunmap(kaddr, KM_READ);
229 return result;
232 static struct mm_struct *get_mm(int pid)
234 struct task_struct *p;
235 struct mm_struct *mm = NULL;
237 read_lock(&tasklist_lock);
238 p = find_task_by_pid(pid);
239 if (p)
240 mm = p->mm;
241 if (mm)
242 atomic_inc(&mm->mm_users);
243 read_unlock(&tasklist_lock);
244 return mm;
248 static int get_env(int pid, char * buffer)
250 struct mm_struct *mm = get_mm(pid);
251 int res = 0;
252 if (mm) {
253 res = get_array(mm, mm->env_start, mm->env_end, buffer);
254 mmput(mm);
256 return res;
259 static int get_arg(int pid, char * buffer)
261 struct mm_struct *mm = get_mm(pid);
262 int res = 0;
263 if (mm) {
264 res = get_array(mm, mm->arg_start, mm->arg_end, buffer);
265 mmput(mm);
267 return res;
271 * These bracket the sleeping functions..
273 extern void scheduling_functions_start_here(void);
274 extern void scheduling_functions_end_here(void);
275 #define first_sched ((unsigned long) scheduling_functions_start_here)
276 #define last_sched ((unsigned long) scheduling_functions_end_here)
278 static unsigned long get_wchan(struct task_struct *p)
280 if (!p || p == current || p->state == TASK_RUNNING)
281 return 0;
282 #if defined(__i386__)
284 unsigned long ebp, esp, eip;
285 unsigned long stack_page;
286 int count = 0;
288 stack_page = (unsigned long)p;
289 esp = p->thread.esp;
290 if (!stack_page || esp < stack_page || esp > 8188+stack_page)
291 return 0;
292 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
293 ebp = *(unsigned long *) esp;
294 do {
295 if (ebp < stack_page || ebp > 8184+stack_page)
296 return 0;
297 eip = *(unsigned long *) (ebp+4);
298 if (eip < first_sched || eip >= last_sched)
299 return eip;
300 ebp = *(unsigned long *) ebp;
301 } while (count++ < 16);
303 #elif defined(__alpha__)
305 * This one depends on the frame size of schedule(). Do a
306 * "disass schedule" in gdb to find the frame size. Also, the
307 * code assumes that sleep_on() follows immediately after
308 * interruptible_sleep_on() and that add_timer() follows
309 * immediately after interruptible_sleep(). Ugly, isn't it?
310 * Maybe adding a wchan field to task_struct would be better,
311 * after all...
314 unsigned long schedule_frame;
315 unsigned long pc;
317 pc = thread_saved_pc(&p->thread);
318 if (pc >= first_sched && pc < last_sched) {
319 schedule_frame = ((unsigned long *)p->thread.ksp)[6];
320 return ((unsigned long *)schedule_frame)[12];
322 return pc;
324 #elif defined(__mips__)
326 * The same comment as on the Alpha applies here, too ...
329 unsigned long schedule_frame;
330 unsigned long pc;
332 pc = thread_saved_pc(&p->tss);
333 if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
334 schedule_frame = ((unsigned long *)(long)p->tss.reg30)[16];
335 return (unsigned long)((unsigned long *)schedule_frame)[11];
337 return pc;
339 #elif defined(__mc68000__)
341 unsigned long fp, pc;
342 unsigned long stack_page;
343 int count = 0;
345 stack_page = (unsigned long)p;
346 fp = ((struct switch_stack *)p->thread.ksp)->a6;
347 do {
348 if (fp < stack_page+sizeof(struct task_struct) ||
349 fp >= 8184+stack_page)
350 return 0;
351 pc = ((unsigned long *)fp)[1];
352 /* FIXME: This depends on the order of these functions. */
353 if (pc < first_sched || pc >= last_sched)
354 return pc;
355 fp = *(unsigned long *) fp;
356 } while (count++ < 16);
358 #elif defined(__powerpc__)
360 unsigned long ip, sp;
361 unsigned long stack_page = (unsigned long) p;
362 int count = 0;
364 sp = p->thread.ksp;
365 do {
366 sp = *(unsigned long *)sp;
367 if (sp < stack_page || sp >= stack_page + 8188)
368 return 0;
369 if (count > 0) {
370 ip = *(unsigned long *)(sp + 4);
371 if (ip < first_sched || ip >= last_sched)
372 return ip;
374 } while (count++ < 16);
376 #elif defined(__arm__)
378 unsigned long fp, lr;
379 unsigned long stack_page;
380 int count = 0;
382 stack_page = 4096 + (unsigned long)p;
383 fp = get_css_fp(&p->thread);
384 do {
385 if (fp < stack_page || fp > 4092+stack_page)
386 return 0;
387 lr = pc_pointer (((unsigned long *)fp)[-1]);
388 if (lr < first_sched || lr > last_sched)
389 return lr;
390 fp = *(unsigned long *) (fp - 12);
391 } while (count ++ < 16);
393 #elif defined (__sparc__)
395 unsigned long pc, fp, bias = 0;
396 unsigned long task_base = (unsigned long) p;
397 struct reg_window *rw;
398 int count = 0;
400 #ifdef __sparc_v9__
401 bias = STACK_BIAS;
402 #endif
403 fp = p->thread.ksp + bias;
404 do {
405 /* Bogus frame pointer? */
406 if (fp < (task_base + sizeof(struct task_struct)) ||
407 fp >= (task_base + (2 * PAGE_SIZE)))
408 break;
409 rw = (struct reg_window *) fp;
410 pc = rw->ins[7];
411 if (pc < first_sched || pc >= last_sched)
412 return pc;
413 fp = rw->ins[6] + bias;
414 } while (++count < 16);
416 #endif
418 return 0;
421 #if defined(__i386__)
422 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
423 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
424 #elif defined(__alpha__)
426 * See arch/alpha/kernel/ptrace.c for details.
428 # define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
429 + (long)&((struct pt_regs *)0)->reg)
430 # define KSTK_EIP(tsk) \
431 (*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
432 # define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
433 #elif defined(__arm__)
434 # ifdef CONFIG_CPU_26
435 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
436 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
437 # else
438 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1021])
439 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
440 # endif
441 #elif defined(__mc68000__)
442 #define KSTK_EIP(tsk) \
443 ({ \
444 unsigned long eip = 0; \
445 if ((tsk)->thread.esp0 > PAGE_SIZE && \
446 MAP_NR((tsk)->thread.esp0) < max_mapnr) \
447 eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
448 eip; })
449 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
450 #elif defined(__powerpc__)
451 #define KSTK_EIP(tsk) ((tsk)->thread.regs->nip)
452 #define KSTK_ESP(tsk) ((tsk)->thread.regs->gpr[1])
453 #elif defined (__sparc_v9__)
454 # define KSTK_EIP(tsk) ((tsk)->thread.kregs->tpc)
455 # define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
456 #elif defined(__sparc__)
457 # define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
458 # define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
459 #elif defined(__mips__)
460 # define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg \
461 - sizeof(struct pt_regs))
462 #define KSTK_TOS(tsk) ((unsigned long)(tsk) + KERNEL_STACK_SIZE - 32)
463 # define KSTK_EIP(tsk) (*(unsigned long *)(KSTK_TOS(tsk) + PT_REG(cp0_epc)))
464 # define KSTK_ESP(tsk) (*(unsigned long *)(KSTK_TOS(tsk) + PT_REG(regs[29])))
465 #elif defined(__sh__)
466 # define KSTK_EIP(tsk) ((tsk)->thread.pc)
467 # define KSTK_ESP(tsk) ((tsk)->thread.sp)
468 #endif
470 /* Gcc optimizes away "strlen(x)" for constant x */
471 #define ADDBUF(buffer, string) \
472 do { memcpy(buffer, string, strlen(string)); \
473 buffer += strlen(string); } while (0)
475 static inline char * task_name(struct task_struct *p, char * buf)
477 int i;
478 char * name;
480 ADDBUF(buf, "Name:\t");
481 name = p->comm;
482 i = sizeof(p->comm);
483 do {
484 unsigned char c = *name;
485 name++;
486 i--;
487 *buf = c;
488 if (!c)
489 break;
490 if (c == '\\') {
491 buf[1] = c;
492 buf += 2;
493 continue;
495 if (c == '\n') {
496 buf[0] = '\\';
497 buf[1] = 'n';
498 buf += 2;
499 continue;
501 buf++;
502 } while (i);
503 *buf = '\n';
504 return buf+1;
508 * The task state array is a strange "bitmap" of
509 * reasons to sleep. Thus "running" is zero, and
510 * you can test for combinations of others with
511 * simple bit tests.
513 static const char *task_state_array[] = {
514 "R (running)", /* 0 */
515 "S (sleeping)", /* 1 */
516 "D (disk sleep)", /* 2 */
517 "Z (zombie)", /* 4 */
518 "T (stopped)", /* 8 */
519 "W (paging)" /* 16 */
522 static inline const char * get_task_state(struct task_struct *tsk)
524 unsigned int state = tsk->state & (TASK_RUNNING |
525 TASK_INTERRUPTIBLE |
526 TASK_UNINTERRUPTIBLE |
527 TASK_ZOMBIE |
528 TASK_STOPPED |
529 TASK_SWAPPING);
530 const char **p = &task_state_array[0];
532 while (state) {
533 p++;
534 state >>= 1;
536 return *p;
539 static inline char * task_state(struct task_struct *p, char *buffer)
541 int g;
543 buffer += sprintf(buffer,
544 "State:\t%s\n"
545 "Pid:\t%d\n"
546 "PPid:\t%d\n"
547 "Uid:\t%d\t%d\t%d\t%d\n"
548 "Gid:\t%d\t%d\t%d\t%d\n"
549 "FDSize:\t%d\n"
550 "Groups:\t",
551 get_task_state(p),
552 p->pid, p->p_pptr->pid,
553 p->uid, p->euid, p->suid, p->fsuid,
554 p->gid, p->egid, p->sgid, p->fsgid,
555 p->files ? p->files->max_fds : 0);
557 for (g = 0; g < p->ngroups; g++)
558 buffer += sprintf(buffer, "%d ", p->groups[g]);
560 buffer += sprintf(buffer, "\n");
561 return buffer;
564 static inline char * task_mem(struct mm_struct *mm, char *buffer)
566 struct vm_area_struct * vma;
567 unsigned long data = 0, stack = 0;
568 unsigned long exec = 0, lib = 0;
570 down(&mm->mmap_sem);
571 for (vma = mm->mmap; vma; vma = vma->vm_next) {
572 unsigned long len = (vma->vm_end - vma->vm_start) >> 10;
573 if (!vma->vm_file) {
574 data += len;
575 if (vma->vm_flags & VM_GROWSDOWN)
576 stack += len;
577 continue;
579 if (vma->vm_flags & VM_WRITE)
580 continue;
581 if (vma->vm_flags & VM_EXEC) {
582 exec += len;
583 if (vma->vm_flags & VM_EXECUTABLE)
584 continue;
585 lib += len;
588 buffer += sprintf(buffer,
589 "VmSize:\t%8lu kB\n"
590 "VmLck:\t%8lu kB\n"
591 "VmRSS:\t%8lu kB\n"
592 "VmData:\t%8lu kB\n"
593 "VmStk:\t%8lu kB\n"
594 "VmExe:\t%8lu kB\n"
595 "VmLib:\t%8lu kB\n",
596 mm->total_vm << (PAGE_SHIFT-10),
597 mm->locked_vm << (PAGE_SHIFT-10),
598 mm->rss << (PAGE_SHIFT-10),
599 data - stack, stack,
600 exec - lib, lib);
601 up(&mm->mmap_sem);
602 return buffer;
605 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
606 sigset_t *catch)
608 struct k_sigaction *k;
609 int i;
611 sigemptyset(ign);
612 sigemptyset(catch);
614 if (p->sig) {
615 k = p->sig->action;
616 for (i = 1; i <= _NSIG; ++i, ++k) {
617 if (k->sa.sa_handler == SIG_IGN)
618 sigaddset(ign, i);
619 else if (k->sa.sa_handler != SIG_DFL)
620 sigaddset(catch, i);
625 static inline char * task_sig(struct task_struct *p, char *buffer)
627 sigset_t ign, catch;
629 buffer += sprintf(buffer, "SigPnd:\t");
630 buffer = render_sigset_t(&p->signal, buffer);
631 *buffer++ = '\n';
632 buffer += sprintf(buffer, "SigBlk:\t");
633 buffer = render_sigset_t(&p->blocked, buffer);
634 *buffer++ = '\n';
636 collect_sigign_sigcatch(p, &ign, &catch);
637 buffer += sprintf(buffer, "SigIgn:\t");
638 buffer = render_sigset_t(&ign, buffer);
639 *buffer++ = '\n';
640 buffer += sprintf(buffer, "SigCgt:\t"); /* Linux 2.0 uses "SigCgt" */
641 buffer = render_sigset_t(&catch, buffer);
642 *buffer++ = '\n';
644 return buffer;
647 extern inline char *task_cap(struct task_struct *p, char *buffer)
649 return buffer + sprintf(buffer, "CapInh:\t%016x\n"
650 "CapPrm:\t%016x\n"
651 "CapEff:\t%016x\n",
652 cap_t(p->cap_inheritable),
653 cap_t(p->cap_permitted),
654 cap_t(p->cap_effective));
658 static int get_status(int pid, char * buffer)
660 char * orig = buffer;
661 struct task_struct *tsk;
662 struct mm_struct *mm = NULL;
664 read_lock(&tasklist_lock);
665 tsk = find_task_by_pid(pid);
666 if (tsk)
667 mm = tsk->mm;
668 if (mm)
669 atomic_inc(&mm->mm_users);
670 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
671 if (!tsk)
672 return 0;
673 buffer = task_name(tsk, buffer);
674 buffer = task_state(tsk, buffer);
675 if (mm)
676 buffer = task_mem(mm, buffer);
677 buffer = task_sig(tsk, buffer);
678 buffer = task_cap(tsk, buffer);
679 if (mm)
680 mmput(mm);
681 return buffer - orig;
684 static int get_stat(int pid, char * buffer)
686 struct task_struct *tsk;
687 struct mm_struct *mm = NULL;
688 unsigned long vsize, eip, esp, wchan;
689 long priority, nice;
690 int tty_pgrp;
691 sigset_t sigign, sigcatch;
692 char state;
693 int res;
695 read_lock(&tasklist_lock);
696 tsk = find_task_by_pid(pid);
697 if (tsk)
698 mm = tsk->mm;
699 if (mm)
700 atomic_inc(&mm->mm_users);
701 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
702 if (!tsk)
703 return 0;
704 state = *get_task_state(tsk);
705 vsize = eip = esp = 0;
706 if (mm) {
707 struct vm_area_struct *vma;
708 down(&mm->mmap_sem);
709 vma = mm->mmap;
710 while (vma) {
711 vsize += vma->vm_end - vma->vm_start;
712 vma = vma->vm_next;
714 eip = KSTK_EIP(tsk);
715 esp = KSTK_ESP(tsk);
716 up(&mm->mmap_sem);
719 wchan = get_wchan(tsk);
721 collect_sigign_sigcatch(tsk, &sigign, &sigcatch);
723 if (tsk->tty)
724 tty_pgrp = tsk->tty->pgrp;
725 else
726 tty_pgrp = -1;
728 /* scale priority and nice values from timeslices to -20..20 */
729 /* to make it look like a "normal" Unix priority/nice value */
730 priority = tsk->counter;
731 priority = 20 - (priority * 10 + DEF_PRIORITY / 2) / DEF_PRIORITY;
732 nice = tsk->priority;
733 nice = 20 - (nice * 20 + DEF_PRIORITY / 2) / DEF_PRIORITY;
735 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
736 %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \
737 %lu %lu %lu %lu %lu %lu %lu %lu %d %d\n",
738 pid,
739 tsk->comm,
740 state,
741 tsk->p_pptr->pid,
742 tsk->pgrp,
743 tsk->session,
744 tsk->tty ? kdev_t_to_nr(tsk->tty->device) : 0,
745 tty_pgrp,
746 tsk->flags,
747 tsk->min_flt,
748 tsk->cmin_flt,
749 tsk->maj_flt,
750 tsk->cmaj_flt,
751 tsk->times.tms_utime,
752 tsk->times.tms_stime,
753 tsk->times.tms_cutime,
754 tsk->times.tms_cstime,
755 priority,
756 nice,
757 0UL /* removed */,
758 tsk->it_real_value,
759 tsk->start_time,
760 vsize,
761 mm ? mm->rss : 0, /* you might want to shift this left 3 */
762 tsk->rlim ? tsk->rlim[RLIMIT_RSS].rlim_cur : 0,
763 mm ? mm->start_code : 0,
764 mm ? mm->end_code : 0,
765 mm ? mm->start_stack : 0,
766 esp,
767 eip,
768 /* The signal information here is obsolete.
769 * It must be decimal for Linux 2.0 compatibility.
770 * Use /proc/#/status for real-time signals.
772 tsk->signal .sig[0] & 0x7fffffffUL,
773 tsk->blocked.sig[0] & 0x7fffffffUL,
774 sigign .sig[0] & 0x7fffffffUL,
775 sigcatch .sig[0] & 0x7fffffffUL,
776 wchan,
777 tsk->nswap,
778 tsk->cnswap,
779 tsk->exit_signal,
780 tsk->processor);
781 if (mm)
782 mmput(mm);
783 return res;
786 static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned long size,
787 int * pages, int * shared, int * dirty, int * total)
789 pte_t * pte;
790 unsigned long end;
792 if (pmd_none(*pmd))
793 return;
794 if (pmd_bad(*pmd)) {
795 pmd_ERROR(*pmd);
796 pmd_clear(pmd);
797 return;
799 pte = pte_offset(pmd, address);
800 address &= ~PMD_MASK;
801 end = address + size;
802 if (end > PMD_SIZE)
803 end = PMD_SIZE;
804 do {
805 pte_t page = *pte;
807 address += PAGE_SIZE;
808 pte++;
809 if (pte_none(page))
810 continue;
811 ++*total;
812 if (!pte_present(page))
813 continue;
814 ++*pages;
815 if (pte_dirty(page))
816 ++*dirty;
817 if (MAP_NR(pte_page(page)) >= max_mapnr)
818 continue;
819 if (page_count(mem_map + MAP_NR(pte_page(page))) > 1)
820 ++*shared;
821 } while (address < end);
824 static inline void statm_pmd_range(pgd_t * pgd, unsigned long address, unsigned long size,
825 int * pages, int * shared, int * dirty, int * total)
827 pmd_t * pmd;
828 unsigned long end;
830 if (pgd_none(*pgd))
831 return;
832 if (pgd_bad(*pgd)) {
833 pgd_ERROR(*pgd);
834 pgd_clear(pgd);
835 return;
837 pmd = pmd_offset(pgd, address);
838 address &= ~PGDIR_MASK;
839 end = address + size;
840 if (end > PGDIR_SIZE)
841 end = PGDIR_SIZE;
842 do {
843 statm_pte_range(pmd, address, end - address, pages, shared, dirty, total);
844 address = (address + PMD_SIZE) & PMD_MASK;
845 pmd++;
846 } while (address < end);
849 static void statm_pgd_range(pgd_t * pgd, unsigned long address, unsigned long end,
850 int * pages, int * shared, int * dirty, int * total)
852 while (address < end) {
853 statm_pmd_range(pgd, address, end - address, pages, shared, dirty, total);
854 address = (address + PGDIR_SIZE) & PGDIR_MASK;
855 pgd++;
859 static int get_statm(int pid, char * buffer)
861 struct mm_struct *mm = get_mm(pid);
862 int size=0, resident=0, share=0, trs=0, lrs=0, drs=0, dt=0;
864 if (mm) {
865 struct vm_area_struct * vma;
866 down(&mm->mmap_sem);
867 vma = mm->mmap;
868 while (vma) {
869 pgd_t *pgd = pgd_offset(mm, vma->vm_start);
870 int pages = 0, shared = 0, dirty = 0, total = 0;
872 statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total);
873 resident += pages;
874 share += shared;
875 dt += dirty;
876 size += total;
877 if (vma->vm_flags & VM_EXECUTABLE)
878 trs += pages; /* text */
879 else if (vma->vm_flags & VM_GROWSDOWN)
880 drs += pages; /* stack */
881 else if (vma->vm_end > 0x60000000)
882 lrs += pages; /* library */
883 else
884 drs += pages;
885 vma = vma->vm_next;
887 up(&mm->mmap_sem);
888 mmput(mm);
890 return sprintf(buffer,"%d %d %d %d %d %d %d\n",
891 size, resident, share, trs, lrs, drs, dt);
895 * The way we support synthetic files > 4K
896 * - without storing their contents in some buffer and
897 * - without walking through the entire synthetic file until we reach the
898 * position of the requested data
899 * is to cleverly encode the current position in the file's f_pos field.
900 * There is no requirement that a read() call which returns `count' bytes
901 * of data increases f_pos by exactly `count'.
903 * This idea is Linus' one. Bruno implemented it.
907 * For the /proc/<pid>/maps file, we use fixed length records, each containing
908 * a single line.
910 #define MAPS_LINE_LENGTH 4096
911 #define MAPS_LINE_SHIFT 12
913 * f_pos = (number of the vma in the task->mm->mmap list) * MAPS_LINE_LENGTH
914 * + (index into the line)
916 /* for systems with sizeof(void*) == 4: */
917 #define MAPS_LINE_FORMAT4 "%08lx-%08lx %s %08lx %s %lu"
918 #define MAPS_LINE_MAX4 49 /* sum of 8 1 8 1 4 1 8 1 5 1 10 1 */
920 /* for systems with sizeof(void*) == 8: */
921 #define MAPS_LINE_FORMAT8 "%016lx-%016lx %s %016lx %s %lu"
922 #define MAPS_LINE_MAX8 73 /* sum of 16 1 16 1 4 1 16 1 5 1 10 1 */
924 #define MAPS_LINE_MAX MAPS_LINE_MAX8
927 static ssize_t read_maps (int pid, struct file * file, char * buf,
928 size_t count, loff_t *ppos)
930 struct task_struct *p;
931 struct vm_area_struct * map, * next;
932 char * destptr = buf, * buffer;
933 loff_t lineno;
934 ssize_t column, i;
935 int volatile_task;
936 long retval;
939 * We might sleep getting the page, so get it first.
941 retval = -ENOMEM;
942 buffer = (char*)__get_free_page(GFP_KERNEL);
943 if (!buffer)
944 goto out;
946 retval = -EINVAL;
947 read_lock(&tasklist_lock);
948 p = find_task_by_pid(pid);
949 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
950 if (!p)
951 goto freepage_out;
953 if (!p->mm || count == 0)
954 goto getlen_out;
956 /* Check whether the mmaps could change if we sleep */
957 volatile_task = (p != current || atomic_read(&p->mm->mm_users) > 1);
959 /* decode f_pos */
960 lineno = *ppos >> MAPS_LINE_SHIFT;
961 column = *ppos & (MAPS_LINE_LENGTH-1);
963 /* quickly go to line lineno */
964 for (map = p->mm->mmap, i = 0; map && (i < lineno); map = map->vm_next, i++)
965 continue;
967 for ( ; map ; map = next ) {
968 /* produce the next line */
969 char *line;
970 char str[5], *cp = str;
971 int flags;
972 kdev_t dev;
973 unsigned long ino;
974 int maxlen = (sizeof(void*) == 4) ?
975 MAPS_LINE_MAX4 : MAPS_LINE_MAX8;
976 int len;
979 * Get the next vma now (but it won't be used if we sleep).
981 next = map->vm_next;
982 flags = map->vm_flags;
984 *cp++ = flags & VM_READ ? 'r' : '-';
985 *cp++ = flags & VM_WRITE ? 'w' : '-';
986 *cp++ = flags & VM_EXEC ? 'x' : '-';
987 *cp++ = flags & VM_MAYSHARE ? 's' : 'p';
988 *cp++ = 0;
990 dev = 0;
991 ino = 0;
992 if (map->vm_file != NULL) {
993 dev = map->vm_file->f_dentry->d_inode->i_dev;
994 ino = map->vm_file->f_dentry->d_inode->i_ino;
995 line = d_path(map->vm_file->f_dentry, buffer, PAGE_SIZE);
996 buffer[PAGE_SIZE-1] = '\n';
997 line -= maxlen;
998 if(line < buffer)
999 line = buffer;
1000 } else
1001 line = buffer;
1003 len = sprintf(line,
1004 sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8,
1005 map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT,
1006 kdevname(dev), ino);
1008 if(map->vm_file) {
1009 for(i = len; i < maxlen; i++)
1010 line[i] = ' ';
1011 len = buffer + PAGE_SIZE - line;
1012 } else
1013 line[len++] = '\n';
1014 if (column >= len) {
1015 column = 0; /* continue with next line at column 0 */
1016 lineno++;
1017 continue; /* we haven't slept */
1020 i = len-column;
1021 if (i > count)
1022 i = count;
1023 copy_to_user(destptr, line+column, i); /* may have slept */
1024 destptr += i;
1025 count -= i;
1026 column += i;
1027 if (column >= len) {
1028 column = 0; /* next time: next line at column 0 */
1029 lineno++;
1032 /* done? */
1033 if (count == 0)
1034 break;
1036 /* By writing to user space, we might have slept.
1037 * Stop the loop, to avoid a race condition.
1039 if (volatile_task)
1040 break;
1043 /* encode f_pos */
1044 *ppos = (lineno << MAPS_LINE_SHIFT) + column;
1046 getlen_out:
1047 retval = destptr - buf;
1049 freepage_out:
1050 free_page((unsigned long)buffer);
1051 out:
1052 return retval;
1055 #ifdef __SMP__
1056 static int get_pidcpu(int pid, char * buffer)
1058 struct task_struct * tsk = current ;
1059 int i, len;
1061 read_lock(&tasklist_lock);
1062 if (pid != tsk->pid)
1063 tsk = find_task_by_pid(pid);
1064 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
1066 if (tsk == NULL)
1067 return 0;
1069 len = sprintf(buffer,
1070 "cpu %lu %lu\n",
1071 tsk->times.tms_utime,
1072 tsk->times.tms_stime);
1074 for (i = 0 ; i < smp_num_cpus; i++)
1075 len += sprintf(buffer + len, "cpu%d %lu %lu\n",
1077 tsk->per_cpu_utime[cpu_logical_map(i)],
1078 tsk->per_cpu_stime[cpu_logical_map(i)]);
1080 return len;
1082 #endif
1084 static int process_unauthorized(int type, int pid)
1086 struct task_struct *p;
1087 uid_t euid=0; /* Save the euid keep the lock short */
1088 int ok = 0;
1090 read_lock(&tasklist_lock);
1093 * Grab the lock, find the task, save the uid and
1094 * check it has an mm still (ie its not dead)
1097 p = find_task_by_pid(pid);
1098 if (p) {
1099 euid=p->euid;
1100 ok = p->dumpable;
1101 if(!cap_issubset(p->cap_permitted, current->cap_permitted))
1102 ok=0;
1105 read_unlock(&tasklist_lock);
1107 if (!p)
1108 return 1;
1110 switch(type) {
1111 case PROC_PID_STATUS:
1112 case PROC_PID_STATM:
1113 case PROC_PID_STAT:
1114 case PROC_PID_MAPS:
1115 case PROC_PID_CMDLINE:
1116 case PROC_PID_CPU:
1117 return 0;
1119 if(capable(CAP_DAC_OVERRIDE) || (current->fsuid == euid && ok))
1120 return 0;
1121 return 1;
1125 static inline int get_process_array(char * page, int pid, int type)
1127 switch (type) {
1128 case PROC_PID_STATUS:
1129 return get_status(pid, page);
1130 case PROC_PID_ENVIRON:
1131 return get_env(pid, page);
1132 case PROC_PID_CMDLINE:
1133 return get_arg(pid, page);
1134 case PROC_PID_STAT:
1135 return get_stat(pid, page);
1136 case PROC_PID_STATM:
1137 return get_statm(pid, page);
1138 #ifdef __SMP__
1139 case PROC_PID_CPU:
1140 return get_pidcpu(pid, page);
1141 #endif
1143 return -EBADF;
1146 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
1148 static ssize_t array_read(struct file * file, char * buf,
1149 size_t count, loff_t *ppos)
1151 struct inode * inode = file->f_dentry->d_inode;
1152 unsigned long page;
1153 char *start;
1154 ssize_t length;
1155 ssize_t end;
1156 unsigned int type, pid;
1157 struct proc_dir_entry *dp;
1159 if (count > PROC_BLOCK_SIZE)
1160 count = PROC_BLOCK_SIZE;
1161 if (!(page = __get_free_page(GFP_KERNEL)))
1162 return -ENOMEM;
1163 type = inode->i_ino;
1164 pid = type >> 16;
1165 type &= 0x0000ffff;
1166 start = NULL;
1167 dp = (struct proc_dir_entry *) inode->u.generic_ip;
1169 if (!pid) { /* can't happen */
1170 free_page(page);
1171 return -EBADF;
1174 if (process_unauthorized(type, pid)) {
1175 free_page(page);
1176 return -EIO;
1179 length = get_process_array((char *) page, pid, type);
1180 if (length < 0) {
1181 free_page(page);
1182 return length;
1184 /* Static 4kB (or whatever) block capacity */
1185 if (*ppos >= length) {
1186 free_page(page);
1187 return 0;
1189 if (count + *ppos > length)
1190 count = length - *ppos;
1191 end = count + *ppos;
1192 copy_to_user(buf, (char *) page + *ppos, count);
1193 *ppos = end;
1194 free_page(page);
1195 return count;
1198 static struct file_operations proc_array_operations = {
1199 NULL, /* array_lseek */
1200 array_read,
1201 NULL, /* array_write */
1202 NULL, /* array_readdir */
1203 NULL, /* array_poll */
1204 NULL, /* array_ioctl */
1205 NULL, /* mmap */
1206 NULL, /* no special open code */
1207 NULL, /* flush */
1208 NULL, /* no special release code */
1209 NULL /* can't fsync */
1212 struct inode_operations proc_array_inode_operations = {
1213 &proc_array_operations, /* default base directory file-ops */
1214 NULL, /* create */
1215 NULL, /* lookup */
1216 NULL, /* link */
1217 NULL, /* unlink */
1218 NULL, /* symlink */
1219 NULL, /* mkdir */
1220 NULL, /* rmdir */
1221 NULL, /* mknod */
1222 NULL, /* rename */
1223 NULL, /* readlink */
1224 NULL, /* follow_link */
1225 NULL, /* get_block */
1226 NULL, /* readpage */
1227 NULL, /* writepage */
1228 NULL, /* flushpage */
1229 NULL, /* truncate */
1230 NULL, /* permission */
1231 NULL, /* smap */
1232 NULL /* revalidate */
1235 static ssize_t arraylong_read(struct file * file, char * buf,
1236 size_t count, loff_t *ppos)
1238 struct inode * inode = file->f_dentry->d_inode;
1239 unsigned int pid = inode->i_ino >> 16;
1240 unsigned int type = inode->i_ino & 0x0000ffff;
1242 switch (type) {
1243 case PROC_PID_MAPS:
1244 return read_maps(pid, file, buf, count, ppos);
1246 return -EINVAL;
1249 static struct file_operations proc_arraylong_operations = {
1250 NULL, /* array_lseek */
1251 arraylong_read,
1252 NULL, /* array_write */
1253 NULL, /* array_readdir */
1254 NULL, /* array_poll */
1255 NULL, /* array_ioctl */
1256 NULL, /* mmap */
1257 NULL, /* no special open code */
1258 NULL, /* flush */
1259 NULL, /* no special release code */
1260 NULL /* can't fsync */
1263 struct inode_operations proc_arraylong_inode_operations = {
1264 &proc_arraylong_operations, /* default base directory file-ops */
1265 NULL, /* create */
1266 NULL, /* lookup */
1267 NULL, /* link */
1268 NULL, /* unlink */
1269 NULL, /* symlink */
1270 NULL, /* mkdir */
1271 NULL, /* rmdir */
1272 NULL, /* mknod */
1273 NULL, /* rename */
1274 NULL, /* readlink */
1275 NULL, /* follow_link */
1276 NULL, /* get_block */
1277 NULL, /* readpage */
1278 NULL, /* writepage */
1279 NULL, /* flushpage */
1280 NULL, /* truncate */
1281 NULL, /* permission */
1282 NULL, /* smap */
1283 NULL /* revalidate */