2 * linux/fs/proc/array.c
4 * Copyright (C) 1992 by Linus Torvalds
5 * based on ideas by Darren Senn
8 * Michael. K. Johnson: stat,statm extensions.
9 * <johnsonm@stolaf.edu>
11 * Pauline Middelink : Made cmdline,envline only break at '\0's, to
12 * make sure SET_PROCTITLE works. Also removed
13 * bad '!' which forced address recalculation for
14 * EVERY character on the current page.
15 * <middelin@polyware.iaf.nl>
17 * Danny ter Haar : added cpuinfo
20 * Alessandro Rubini : profile extension.
21 * <rubini@ipvvis.unipv.it>
23 * Jeff Tranter : added BogoMips field to cpuinfo
24 * <Jeff_Tranter@Mitel.COM>
26 * Bruno Haible : remove 4K limit for the maps file
27 * <haible@ma2s2.mathematik.uni-karlsruhe.de>
29 * Yves Arrouye : remove removal of trailing spaces in get_array.
30 * <Yves.Arrouye@marin.fdn.fr>
33 #include <linux/types.h>
34 #include <linux/errno.h>
35 #include <linux/sched.h>
36 #include <linux/kernel.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/tty.h>
39 #include <linux/user.h>
40 #include <linux/a.out.h>
41 #include <linux/string.h>
42 #include <linux/mman.h>
43 #include <linux/proc_fs.h>
44 #include <linux/ioport.h>
45 #include <linux/config.h>
47 #include <linux/pagemap.h>
48 #include <linux/swap.h>
49 #include <linux/slab.h>
50 #include <linux/smp.h>
51 #include <linux/signal.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgtable.h>
57 #define LOAD_INT(x) ((x) >> FSHIFT)
58 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
60 #ifdef CONFIG_DEBUG_MALLOC
61 int get_malloc(char * buffer
);
65 static ssize_t
read_core(struct file
* file
, char * buf
,
66 size_t count
, loff_t
*ppos
)
68 unsigned long p
= *ppos
, memsize
;
73 #if defined (__i386__) || defined (__mc68000__)
74 # define FIRST_MAPPED PAGE_SIZE /* we don't have page 0 mapped on x86.. */
76 # define FIRST_MAPPED 0
79 memset(&dump
, 0, sizeof(struct user
));
81 dump
.u_dsize
= max_mapnr
;
83 dump
.start_data
= PAGE_OFFSET
;
86 memsize
= (max_mapnr
+ 1) << PAGE_SHIFT
;
89 if (count
> memsize
- p
)
93 if (p
< sizeof(struct user
) && count
> 0) {
95 if (p
+ count1
> sizeof(struct user
))
96 count1
= sizeof(struct user
)-p
;
97 pnt
= (char *) &dump
+ p
;
98 copy_to_user(buf
,(void *) pnt
, count1
);
105 if (count
> 0 && p
< PAGE_SIZE
+ FIRST_MAPPED
) {
106 count1
= PAGE_SIZE
+ FIRST_MAPPED
- p
;
109 clear_user(buf
, count1
);
116 copy_to_user(buf
, (void *) (PAGE_OFFSET
+p
-PAGE_SIZE
), count
);
123 static struct file_operations proc_kcore_operations
= {
128 struct inode_operations proc_kcore_inode_operations
= {
129 &proc_kcore_operations
,
133 * This function accesses profiling information. The returned data is
134 * binary: the sampling step and the actual contents of the profile
135 * buffer. Use of the program readprofile is recommended in order to
136 * get meaningful info out of these data.
138 static ssize_t
read_profile(struct file
*file
, char *buf
,
139 size_t count
, loff_t
*ppos
)
141 unsigned long p
= *ppos
;
144 unsigned int sample_step
= 1 << prof_shift
;
146 if (p
>= (prof_len
+1)*sizeof(unsigned int))
148 if (count
> (prof_len
+1)*sizeof(unsigned int) - p
)
149 count
= (prof_len
+1)*sizeof(unsigned int) - p
;
152 while (p
< sizeof(unsigned int) && count
> 0) {
153 put_user(*((char *)(&sample_step
)+p
),buf
);
154 buf
++; p
++; count
--; read
++;
156 pnt
= (char *)prof_buffer
+ p
- sizeof(unsigned int);
157 copy_to_user(buf
,(void *)pnt
,count
);
164 * Writing to /proc/profile resets the counters
166 * Writing a 'profiling multiplier' value into it also re-sets the profiling
167 * interrupt frequency, on architectures that support this.
169 static ssize_t
write_profile(struct file
* file
, const char * buf
,
170 size_t count
, loff_t
*ppos
)
173 extern int setup_profiling_timer (unsigned int multiplier
);
175 if (count
==sizeof(int)) {
176 unsigned int multiplier
;
178 if (copy_from_user(&multiplier
, buf
, sizeof(int)))
181 if (setup_profiling_timer(multiplier
))
186 memset(prof_buffer
, 0, prof_len
* sizeof(*prof_buffer
));
190 static struct file_operations proc_profile_operations
= {
196 struct inode_operations proc_profile_inode_operations
= {
197 &proc_profile_operations
,
201 static int get_loadavg(char * buffer
)
205 a
= avenrun
[0] + (FIXED_1
/200);
206 b
= avenrun
[1] + (FIXED_1
/200);
207 c
= avenrun
[2] + (FIXED_1
/200);
208 return sprintf(buffer
,"%d.%02d %d.%02d %d.%02d %d/%d %d\n",
209 LOAD_INT(a
), LOAD_FRAC(a
),
210 LOAD_INT(b
), LOAD_FRAC(b
),
211 LOAD_INT(c
), LOAD_FRAC(c
),
212 nr_running
, nr_tasks
, last_pid
);
215 static int get_kstat(char * buffer
)
219 extern unsigned long total_forks
;
222 ticks
= jiffies
* smp_num_cpus
;
223 for (i
= 0 ; i
< NR_IRQS
; i
++)
224 sum
+= kstat
.interrupts
[i
];
225 len
= sprintf(buffer
,
228 "disk_rio %u %u %u %u\n"
229 "disk_wio %u %u %u %u\n"
230 "disk_rblk %u %u %u %u\n"
231 "disk_wblk %u %u %u %u\n"
238 ticks
- (kstat
.cpu_user
+ kstat
.cpu_nice
+ kstat
.cpu_system
),
239 kstat
.dk_drive
[0], kstat
.dk_drive
[1],
240 kstat
.dk_drive
[2], kstat
.dk_drive
[3],
241 kstat
.dk_drive_rio
[0], kstat
.dk_drive_rio
[1],
242 kstat
.dk_drive_rio
[2], kstat
.dk_drive_rio
[3],
243 kstat
.dk_drive_wio
[0], kstat
.dk_drive_wio
[1],
244 kstat
.dk_drive_wio
[2], kstat
.dk_drive_wio
[3],
245 kstat
.dk_drive_rblk
[0], kstat
.dk_drive_rblk
[1],
246 kstat
.dk_drive_rblk
[2], kstat
.dk_drive_rblk
[3],
247 kstat
.dk_drive_wblk
[0], kstat
.dk_drive_wblk
[1],
248 kstat
.dk_drive_wblk
[2], kstat
.dk_drive_wblk
[3],
254 for (i
= 0 ; i
< NR_IRQS
; i
++)
255 len
+= sprintf(buffer
+ len
, " %u", kstat
.interrupts
[i
]);
256 len
+= sprintf(buffer
+ len
,
261 xtime
.tv_sec
- jiffies
/ HZ
,
267 static int get_uptime(char * buffer
)
269 unsigned long uptime
;
273 idle
= task
[0]->times
.tms_utime
+ task
[0]->times
.tms_stime
;
275 /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
276 that would overflow about every five days at HZ == 100.
277 Therefore the identity a = (a / b) * b + a % b is used so that it is
278 calculated as (((t / HZ) * 100) + ((t % HZ) * 100) / HZ) % 100.
279 The part in front of the '+' always evaluates as 0 (mod 100). All divisions
280 in the above formulas are truncating. For HZ being a power of 10, the
281 calculations simplify to the version in the #else part (if the printf
282 format is adapted to the same number of digits as zeroes in HZ.
285 return sprintf(buffer
,"%lu.%02lu %lu.%02lu\n",
287 (((uptime
% HZ
) * 100) / HZ
) % 100,
289 (((idle
% HZ
) * 100) / HZ
) % 100);
291 return sprintf(buffer
,"%lu.%02lu %lu.%02lu\n",
299 static int get_meminfo(char * buffer
)
306 len
= sprintf(buffer
, " total: used: free: shared: buffers: cached:\n"
307 "Mem: %8lu %8lu %8lu %8lu %8lu %8lu\n"
308 "Swap: %8lu %8lu %8lu\n",
309 i
.totalram
, i
.totalram
-i
.freeram
, i
.freeram
, i
.sharedram
, i
.bufferram
, page_cache_size
*PAGE_SIZE
,
310 i
.totalswap
, i
.totalswap
-i
.freeswap
, i
.freeswap
);
312 * Tagged format, for easy grepping and expansion. The above will go away
313 * eventually, once the tools have been updated.
315 return len
+ sprintf(buffer
+len
,
316 "MemTotal: %8lu kB\n"
318 "MemShared: %8lu kB\n"
321 "SwapTotal: %8lu kB\n"
322 "SwapFree: %8lu kB\n",
327 page_cache_size
<< (PAGE_SHIFT
- 10),
332 static int get_version(char * buffer
)
334 extern char *linux_banner
;
336 strcpy(buffer
, linux_banner
);
337 return strlen(buffer
);
340 static int get_cmdline(char * buffer
)
342 extern char saved_command_line
[];
344 return sprintf(buffer
, "%s\n", saved_command_line
);
347 static unsigned long get_phys_addr(struct task_struct
* p
, unsigned long ptr
)
353 if (!p
|| !p
->mm
|| ptr
>= TASK_SIZE
)
355 /* Check for NULL pgd .. shouldn't happen! */
357 printk("get_phys_addr: pid %d has NULL pgd!\n", p
->pid
);
361 page_dir
= pgd_offset(p
->mm
,ptr
);
362 if (pgd_none(*page_dir
))
364 if (pgd_bad(*page_dir
)) {
365 printk("bad page directory entry %08lx\n", pgd_val(*page_dir
));
369 page_middle
= pmd_offset(page_dir
,ptr
);
370 if (pmd_none(*page_middle
))
372 if (pmd_bad(*page_middle
)) {
373 printk("bad page middle entry %08lx\n", pmd_val(*page_middle
));
374 pmd_clear(page_middle
);
377 pte
= *pte_offset(page_middle
,ptr
);
378 if (!pte_present(pte
))
380 return pte_page(pte
) + (ptr
& ~PAGE_MASK
);
383 static int get_array(struct task_struct
*p
, unsigned long start
, unsigned long end
, char * buffer
)
386 int size
= 0, result
= 0;
392 addr
= get_phys_addr(p
, start
);
399 if (size
< PAGE_SIZE
)
405 if (!c
&& start
>= end
)
407 } while (addr
& ~PAGE_MASK
);
412 static int get_env(int pid
, char * buffer
)
414 struct task_struct
*p
= find_task_by_pid(pid
);
418 return get_array(p
, p
->mm
->env_start
, p
->mm
->env_end
, buffer
);
421 static int get_arg(int pid
, char * buffer
)
423 struct task_struct
*p
= find_task_by_pid(pid
);
427 return get_array(p
, p
->mm
->arg_start
, p
->mm
->arg_end
, buffer
);
431 * These bracket the sleeping functions..
433 extern void scheduling_functions_start_here(void);
434 extern void scheduling_functions_end_here(void);
435 #define first_sched ((unsigned long) scheduling_functions_start_here)
436 #define last_sched ((unsigned long) scheduling_functions_end_here)
438 static unsigned long get_wchan(struct task_struct
*p
)
440 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
442 #if defined(__i386__)
444 unsigned long ebp
, eip
;
445 unsigned long stack_page
;
448 stack_page
= 4096 + (unsigned long)p
;
453 if (ebp
< stack_page
|| ebp
>= 4092+stack_page
)
455 eip
= *(unsigned long *) (ebp
+4);
456 if (eip
< first_sched
|| eip
>= last_sched
)
458 ebp
= *(unsigned long *) ebp
;
459 } while (count
++ < 16);
461 #elif defined(__alpha__)
463 * This one depends on the frame size of schedule(). Do a
464 * "disass schedule" in gdb to find the frame size. Also, the
465 * code assumes that sleep_on() follows immediately after
466 * interruptible_sleep_on() and that add_timer() follows
467 * immediately after interruptible_sleep(). Ugly, isn't it?
468 * Maybe adding a wchan field to task_struct would be better,
472 unsigned long schedule_frame
;
475 pc
= thread_saved_pc(&p
->tss
);
476 if (pc
>= first_sched
&& pc
< last_sched
) {
477 schedule_frame
= ((unsigned long *)p
->tss
.ksp
)[6];
478 return ((unsigned long *)schedule_frame
)[12];
482 #elif defined(__mc68000__)
484 unsigned long fp
, pc
;
485 unsigned long stack_page
;
487 extern int sys_pause (void);
489 stack_page
= p
->kernel_stack_page
;
492 fp
= ((struct switch_stack
*)p
->tss
.ksp
)->a6
;
494 if (fp
< stack_page
|| fp
>= 4088+stack_page
)
496 pc
= ((unsigned long *)fp
)[1];
497 /* FIXME: This depends on the order of these functions. */
498 if (pc
< first_sched
|| pc
>= last_sched
)
500 fp
= *(unsigned long *) fp
;
501 } while (count
++ < 16);
503 #elif defined(__powerpc__)
504 return (p
->tss
.wchan
);
509 #if defined(__i386__)
510 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
511 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
512 #elif defined(__alpha__)
514 * See arch/alpha/kernel/ptrace.c for details.
516 # define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
517 + (long)&((struct pt_regs *)0)->reg)
518 # define KSTK_EIP(tsk) \
519 (*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
520 # define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
521 #elif defined(__mc68000__)
522 #define KSTK_EIP(tsk) \
524 unsigned long eip = 0; \
525 if ((tsk)->tss.esp0 > PAGE_SIZE && \
526 MAP_NR((tsk)->tss.esp0) < max_mapnr) \
527 eip = ((struct pt_regs *) (tsk)->tss.esp0)->pc; \
529 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
530 #elif defined(__powerpc__)
531 #define KSTK_EIP(tsk) ((tsk)->tss.regs->nip)
532 #define KSTK_ESP(tsk) ((tsk)->tss.regs->gpr[1])
533 #elif defined (__sparc_v9__)
534 # define KSTK_EIP(tsk) ((tsk)->tss.kregs->tpc)
535 # define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
536 #elif defined(__sparc__)
537 # define KSTK_EIP(tsk) ((tsk)->tss.kregs->pc)
538 # define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
541 /* Gcc optimizes away "strlen(x)" for constant x */
542 #define ADDBUF(buffer, string) \
543 do { memcpy(buffer, string, strlen(string)); \
544 buffer += strlen(string); } while (0)
546 static inline char * task_name(struct task_struct
*p
, char * buf
)
551 ADDBUF(buf
, "Name:\t");
555 unsigned char c
= *name
;
578 static inline char * task_state(struct task_struct
*p
, char *buffer
)
580 #define NR_STATES (sizeof(states)/sizeof(const char *))
581 unsigned int n
= p
->state
;
582 static const char * states
[] = {
595 buffer
+= sprintf(buffer
,
599 "Uid:\t%d\t%d\t%d\t%d\n"
600 "Gid:\t%d\t%d\t%d\t%d\n",
602 p
->pid
, p
->p_pptr
->pid
,
603 p
->uid
, p
->euid
, p
->suid
, p
->fsuid
,
604 p
->gid
, p
->egid
, p
->sgid
, p
->fsgid
);
608 static inline char * task_mem(struct task_struct
*p
, char *buffer
)
610 struct mm_struct
* mm
= p
->mm
;
612 if (mm
&& mm
!= &init_mm
) {
613 struct vm_area_struct
* vma
= mm
->mmap
;
614 unsigned long data
= 0, stack
= 0;
615 unsigned long exec
= 0, lib
= 0;
617 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
618 unsigned long len
= (vma
->vm_end
- vma
->vm_start
) >> 10;
619 if (!vma
->vm_dentry
) {
621 if (vma
->vm_flags
& VM_GROWSDOWN
)
625 if (vma
->vm_flags
& VM_WRITE
)
627 if (vma
->vm_flags
& VM_EXEC
) {
629 if (vma
->vm_flags
& VM_EXECUTABLE
)
634 buffer
+= sprintf(buffer
,
642 mm
->total_vm
<< (PAGE_SHIFT
-10),
643 mm
->locked_vm
<< (PAGE_SHIFT
-10),
644 mm
->rss
<< (PAGE_SHIFT
-10),
651 char * render_sigset_t(sigset_t
*set
, char *buffer
)
656 if (sigismember(set
, i
+1)) x
|= 1;
657 if (sigismember(set
, i
+2)) x
|= 2;
658 if (sigismember(set
, i
+3)) x
|= 4;
659 if (sigismember(set
, i
+4)) x
|= 8;
660 *buffer
++ = (x
< 10 ? '0' : 'a' - 10) + x
;
666 static void collect_sigign_sigcatch(struct task_struct
*p
, sigset_t
*ign
,
669 struct k_sigaction
*k
;
677 for (i
= 1; i
<= _NSIG
; ++i
, ++k
) {
678 if (k
->sa
.sa_handler
== SIG_IGN
)
680 else if (k
->sa
.sa_handler
!= SIG_DFL
)
686 static inline char * task_sig(struct task_struct
*p
, char *buffer
)
690 buffer
+= sprintf(buffer
, "SigPnd:\t");
691 buffer
= render_sigset_t(&p
->signal
, buffer
);
693 buffer
+= sprintf(buffer
, "SigBlk:\t");
694 buffer
= render_sigset_t(&p
->blocked
, buffer
);
697 collect_sigign_sigcatch(p
, &ign
, &catch);
698 buffer
+= sprintf(buffer
, "SigIgn:\t");
699 buffer
= render_sigset_t(&ign
, buffer
);
701 buffer
+= sprintf(buffer
, "SigCat:\t");
702 buffer
= render_sigset_t(&catch, buffer
);
708 static int get_status(int pid
, char * buffer
)
710 char * orig
= buffer
;
711 struct task_struct
*tsk
= find_task_by_pid(pid
);
715 buffer
= task_name(tsk
, buffer
);
716 buffer
= task_state(tsk
, buffer
);
717 buffer
= task_mem(tsk
, buffer
);
718 buffer
= task_sig(tsk
, buffer
);
719 return buffer
- orig
;
722 static int get_stat(int pid
, char * buffer
)
724 struct task_struct
*tsk
= find_task_by_pid(pid
);
725 unsigned long vsize
, eip
, esp
, wchan
;
728 sigset_t sigign
, sigcatch
;
729 char signal_str
[sizeof(sigset_t
)*2+1];
730 char blocked_str
[sizeof(sigset_t
)*2+1];
731 char sigign_str
[sizeof(sigset_t
)*2+1];
732 char sigcatch_str
[sizeof(sigset_t
)*2+1];
737 if (tsk
->state
< 0 || tsk
->state
> 5)
740 state
= "RSDZTW"[tsk
->state
];
741 vsize
= eip
= esp
= 0;
742 if (tsk
->mm
&& tsk
->mm
!= &init_mm
) {
743 struct vm_area_struct
*vma
= tsk
->mm
->mmap
;
745 vsize
+= vma
->vm_end
- vma
->vm_start
;
752 wchan
= get_wchan(tsk
);
754 collect_sigign_sigcatch(tsk
, &sigign
, &sigcatch
);
755 render_sigset_t(&tsk
->signal
, signal_str
);
756 render_sigset_t(&tsk
->blocked
, blocked_str
);
757 render_sigset_t(&sigign
, sigign_str
);
758 render_sigset_t(&sigcatch
, sigcatch_str
);
761 tty_pgrp
= tsk
->tty
->pgrp
;
765 /* scale priority and nice values from timeslices to -20..20 */
766 /* to make it look like a "normal" unix priority/nice value */
767 priority
= tsk
->counter
;
768 priority
= 20 - (priority
* 10 + DEF_PRIORITY
/ 2) / DEF_PRIORITY
;
769 nice
= tsk
->priority
;
770 nice
= 20 - (nice
* 20 + DEF_PRIORITY
/ 2) / DEF_PRIORITY
;
772 return sprintf(buffer
,"%d (%s) %c %d %d %d %d %d %lu %lu \
773 %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \
774 %lu %s %s %s %s %lu %lu %lu\n",
781 tsk
->tty
? kdev_t_to_nr(tsk
->tty
->device
) : 0,
788 tsk
->times
.tms_utime
,
789 tsk
->times
.tms_stime
,
790 tsk
->times
.tms_cutime
,
791 tsk
->times
.tms_cstime
,
798 tsk
->mm
? tsk
->mm
->rss
: 0, /* you might want to shift this left 3 */
799 tsk
->rlim
? tsk
->rlim
[RLIMIT_RSS
].rlim_cur
: 0,
800 tsk
->mm
? tsk
->mm
->start_code
: 0,
801 tsk
->mm
? tsk
->mm
->end_code
: 0,
802 tsk
->mm
? tsk
->mm
->start_stack
: 0,
814 static inline void statm_pte_range(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
815 int * pages
, int * shared
, int * dirty
, int * total
)
823 printk("statm_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd
));
827 pte
= pte_offset(pmd
, address
);
828 address
&= ~PMD_MASK
;
829 end
= address
+ size
;
835 address
+= PAGE_SIZE
;
840 if (!pte_present(page
))
845 if (MAP_NR(pte_page(page
)) >= max_mapnr
)
847 if (atomic_read(&mem_map
[MAP_NR(pte_page(page
))].count
) > 1)
849 } while (address
< end
);
852 static inline void statm_pmd_range(pgd_t
* pgd
, unsigned long address
, unsigned long size
,
853 int * pages
, int * shared
, int * dirty
, int * total
)
861 printk("statm_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd
));
865 pmd
= pmd_offset(pgd
, address
);
866 address
&= ~PGDIR_MASK
;
867 end
= address
+ size
;
868 if (end
> PGDIR_SIZE
)
871 statm_pte_range(pmd
, address
, end
- address
, pages
, shared
, dirty
, total
);
872 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
874 } while (address
< end
);
877 static void statm_pgd_range(pgd_t
* pgd
, unsigned long address
, unsigned long end
,
878 int * pages
, int * shared
, int * dirty
, int * total
)
880 while (address
< end
) {
881 statm_pmd_range(pgd
, address
, end
- address
, pages
, shared
, dirty
, total
);
882 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
887 static int get_statm(int pid
, char * buffer
)
889 struct task_struct
*tsk
= find_task_by_pid(pid
);
890 int size
=0, resident
=0, share
=0, trs
=0, lrs
=0, drs
=0, dt
=0;
894 if (tsk
->mm
&& tsk
->mm
!= &init_mm
) {
895 struct vm_area_struct
* vma
= tsk
->mm
->mmap
;
898 pgd_t
*pgd
= pgd_offset(tsk
->mm
, vma
->vm_start
);
899 int pages
= 0, shared
= 0, dirty
= 0, total
= 0;
901 statm_pgd_range(pgd
, vma
->vm_start
, vma
->vm_end
, &pages
, &shared
, &dirty
, &total
);
906 if (vma
->vm_flags
& VM_EXECUTABLE
)
907 trs
+= pages
; /* text */
908 else if (vma
->vm_flags
& VM_GROWSDOWN
)
909 drs
+= pages
; /* stack */
910 else if (vma
->vm_end
> 0x60000000)
911 lrs
+= pages
; /* library */
917 return sprintf(buffer
,"%d %d %d %d %d %d %d\n",
918 size
, resident
, share
, trs
, lrs
, drs
, dt
);
922 * The way we support synthetic files > 4K
923 * - without storing their contents in some buffer and
924 * - without walking through the entire synthetic file until we reach the
925 * position of the requested data
926 * is to cleverly encode the current position in the file's f_pos field.
927 * There is no requirement that a read() call which returns `count' bytes
928 * of data increases f_pos by exactly `count'.
930 * This idea is Linus' one. Bruno implemented it.
934 * For the /proc/<pid>/maps file, we use fixed length records, each containing
937 #define MAPS_LINE_LENGTH 4096
938 #define MAPS_LINE_SHIFT 12
940 * f_pos = (number of the vma in the task->mm->mmap list) * MAPS_LINE_LENGTH
941 * + (index into the line)
943 /* for systems with sizeof(void*) == 4: */
944 #define MAPS_LINE_FORMAT4 "%08lx-%08lx %s %08lx %s %lu"
945 #define MAPS_LINE_MAX4 49 /* sum of 8 1 8 1 4 1 8 1 5 1 10 1 */
947 /* for systems with sizeof(void*) == 8: */
948 #define MAPS_LINE_FORMAT8 "%016lx-%016lx %s %016lx %s %lu"
949 #define MAPS_LINE_MAX8 73 /* sum of 16 1 16 1 4 1 16 1 5 1 10 1 */
951 #define MAPS_LINE_MAX MAPS_LINE_MAX8
954 static ssize_t
read_maps (int pid
, struct file
* file
, char * buf
,
955 size_t count
, loff_t
*ppos
)
957 struct task_struct
*p
;
958 struct vm_area_struct
* map
, * next
;
959 char * destptr
= buf
, * buffer
;
966 * We might sleep getting the page, so get it first.
969 buffer
= (char*)__get_free_page(GFP_KERNEL
);
974 p
= find_task_by_pid(pid
);
978 if (!p
->mm
|| p
->mm
== &init_mm
|| count
== 0)
981 /* Check whether the mmaps could change if we sleep */
982 volatile_task
= (p
!= current
|| p
->mm
->count
> 1);
985 lineno
= *ppos
>> MAPS_LINE_SHIFT
;
986 column
= *ppos
& (MAPS_LINE_LENGTH
-1);
988 /* quickly go to line lineno */
989 for (map
= p
->mm
->mmap
, i
= 0; map
&& (i
< lineno
); map
= map
->vm_next
, i
++)
992 for ( ; map
; map
= next
) {
993 /* produce the next line */
995 char str
[5], *cp
= str
;
999 int maxlen
= (sizeof(void*) == 4) ?
1000 MAPS_LINE_MAX4
: MAPS_LINE_MAX8
;
1004 * Get the next vma now (but it won't be used if we sleep).
1006 next
= map
->vm_next
;
1007 flags
= map
->vm_flags
;
1009 *cp
++ = flags
& VM_READ
? 'r' : '-';
1010 *cp
++ = flags
& VM_WRITE
? 'w' : '-';
1011 *cp
++ = flags
& VM_EXEC
? 'x' : '-';
1012 *cp
++ = flags
& VM_MAYSHARE
? 's' : 'p';
1017 if (map
->vm_dentry
!= NULL
) {
1018 dev
= map
->vm_dentry
->d_inode
->i_dev
;
1019 ino
= map
->vm_dentry
->d_inode
->i_ino
;
1020 line
= d_path(map
->vm_dentry
, buffer
, PAGE_SIZE
);
1021 buffer
[PAGE_SIZE
-1] = '\n';
1029 sizeof(void*) == 4 ? MAPS_LINE_FORMAT4
: MAPS_LINE_FORMAT8
,
1030 map
->vm_start
, map
->vm_end
, str
, map
->vm_offset
,
1031 kdevname(dev
), ino
);
1033 if(map
->vm_dentry
) {
1034 for(i
= len
; i
< maxlen
; i
++)
1036 len
= buffer
+ PAGE_SIZE
- line
;
1039 if (column
>= len
) {
1040 column
= 0; /* continue with next line at column 0 */
1042 continue; /* we haven't slept */
1048 copy_to_user(destptr
, line
+column
, i
); /* may have slept */
1052 if (column
>= len
) {
1053 column
= 0; /* next time: next line at column 0 */
1061 /* By writing to user space, we might have slept.
1062 * Stop the loop, to avoid a race condition.
1069 *ppos
= (lineno
<< MAPS_LINE_SHIFT
) + column
;
1072 retval
= destptr
- buf
;
1075 free_page((unsigned long)buffer
);
1080 #ifdef CONFIG_MODULES
1081 extern int get_module_list(char *);
1082 extern int get_ksyms_list(char *, char **, off_t
, int);
1084 extern int get_device_list(char *);
1085 extern int get_filesystem_list(char *);
1086 extern int get_filesystem_info( char * );
1087 extern int get_irq_list(char *);
1088 extern int get_dma_list(char *);
1089 extern int get_cpuinfo(char *);
1090 extern int get_pci_list(char*);
1091 extern int get_md_status (char *);
1092 extern int get_rtc_status (char *);
1093 extern int get_locks_status (char *, char **, off_t
, int);
1094 extern int get_swaparea_info (char *);
1096 extern int get_smp_prof_list(char *);
1099 extern int zorro_get_list(char *);
1101 #if defined (CONFIG_AMIGA) || defined (CONFIG_ATARI)
1102 extern int get_hardware_list(char *);
1105 static long get_root_array(char * page
, int type
, char **start
,
1106 off_t offset
, unsigned long length
)
1110 return get_loadavg(page
);
1113 return get_uptime(page
);
1116 return get_meminfo(page
);
1120 return get_pci_list(page
);
1124 return get_cpuinfo(page
);
1127 return get_version(page
);
1129 #ifdef CONFIG_DEBUG_MALLOC
1131 return get_malloc(page
);
1134 #ifdef CONFIG_MODULES
1136 return get_module_list(page
);
1139 return get_ksyms_list(page
, start
, offset
, length
);
1143 return get_kstat(page
);
1146 return get_slabinfo(page
);
1149 return get_device_list(page
);
1151 case PROC_INTERRUPTS
:
1152 return get_irq_list(page
);
1154 case PROC_FILESYSTEMS
:
1155 return get_filesystem_list(page
);
1158 return get_dma_list(page
);
1161 return get_ioport_list(page
);
1162 #ifdef CONFIG_BLK_DEV_MD
1164 return get_md_status(page
);
1168 return get_smp_prof_list(page
);
1171 return get_cmdline(page
);
1174 return get_filesystem_info( page
);
1177 return get_swaparea_info(page
);
1180 return get_rtc_status(page
);
1183 return get_locks_status(page
, start
, offset
, length
);
1186 return zorro_get_list(page
);
1188 #if defined (CONFIG_AMIGA) || defined (CONFIG_ATARI)
1190 return get_hardware_list(page
);
1196 static int get_process_array(char * page
, int pid
, int type
)
1199 case PROC_PID_STATUS
:
1200 return get_status(pid
, page
);
1201 case PROC_PID_ENVIRON
:
1202 return get_env(pid
, page
);
1203 case PROC_PID_CMDLINE
:
1204 return get_arg(pid
, page
);
1206 return get_stat(pid
, page
);
1207 case PROC_PID_STATM
:
1208 return get_statm(pid
, page
);
1214 static inline int fill_array(char * page
, int pid
, int type
, char **start
, off_t offset
, int length
)
1217 return get_process_array(page
, pid
, type
);
1218 return get_root_array(page
, type
, start
, offset
, length
);
1221 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
1223 static ssize_t
array_read(struct file
* file
, char * buf
,
1224 size_t count
, loff_t
*ppos
)
1226 struct inode
* inode
= file
->f_dentry
->d_inode
;
1231 unsigned int type
, pid
;
1232 struct proc_dir_entry
*dp
;
1234 if (count
> PROC_BLOCK_SIZE
)
1235 count
= PROC_BLOCK_SIZE
;
1236 if (!(page
= __get_free_page(GFP_KERNEL
)))
1238 type
= inode
->i_ino
;
1242 dp
= (struct proc_dir_entry
*) inode
->u
.generic_ip
;
1244 length
= dp
->get_info((char *)page
, &start
, *ppos
,
1247 length
= fill_array((char *) page
, pid
, type
,
1248 &start
, *ppos
, count
);
1253 if (start
!= NULL
) {
1254 /* We have had block-adjusting processing! */
1255 copy_to_user(buf
, start
, length
);
1259 /* Static 4kB (or whatever) block capacity */
1260 if (*ppos
>= length
) {
1264 if (count
+ *ppos
> length
)
1265 count
= length
- *ppos
;
1266 end
= count
+ *ppos
;
1267 copy_to_user(buf
, (char *) page
+ *ppos
, count
);
1274 static struct file_operations proc_array_operations
= {
1275 NULL
, /* array_lseek */
1277 NULL
, /* array_write */
1278 NULL
, /* array_readdir */
1279 NULL
, /* array_poll */
1280 NULL
, /* array_ioctl */
1282 NULL
, /* no special open code */
1283 NULL
, /* no special release code */
1284 NULL
/* can't fsync */
1287 struct inode_operations proc_array_inode_operations
= {
1288 &proc_array_operations
, /* default base directory file-ops */
1298 NULL
, /* readlink */
1299 NULL
, /* follow_link */
1300 NULL
, /* readpage */
1301 NULL
, /* writepage */
1303 NULL
, /* truncate */
1304 NULL
/* permission */
1307 static ssize_t
arraylong_read(struct file
* file
, char * buf
,
1308 size_t count
, loff_t
*ppos
)
1310 struct inode
* inode
= file
->f_dentry
->d_inode
;
1311 unsigned int pid
= inode
->i_ino
>> 16;
1312 unsigned int type
= inode
->i_ino
& 0x0000ffff;
1316 return read_maps(pid
, file
, buf
, count
, ppos
);
1321 static struct file_operations proc_arraylong_operations
= {
1322 NULL
, /* array_lseek */
1324 NULL
, /* array_write */
1325 NULL
, /* array_readdir */
1326 NULL
, /* array_poll */
1327 NULL
, /* array_ioctl */
1329 NULL
, /* no special open code */
1330 NULL
, /* no special release code */
1331 NULL
/* can't fsync */
1334 struct inode_operations proc_arraylong_inode_operations
= {
1335 &proc_arraylong_operations
, /* default base directory file-ops */
1345 NULL
, /* readlink */
1346 NULL
, /* follow_link */
1347 NULL
, /* readpage */
1348 NULL
, /* writepage */
1350 NULL
, /* truncate */
1351 NULL
/* permission */