Import 2.3.9pre5
[davej-history.git] / fs / proc / array.c
blob66108f9a79f015473c47698112aeacf6222a1c1b
1 /*
2 * linux/fs/proc/array.c
4 * Copyright (C) 1992 by Linus Torvalds
5 * based on ideas by Darren Senn
7 * Fixes:
8 * Michael. K. Johnson: stat,statm extensions.
9 * <johnsonm@stolaf.edu>
11 * Pauline Middelink : Made cmdline,envline only break at '\0's, to
12 * make sure SET_PROCTITLE works. Also removed
13 * bad '!' which forced address recalculation for
14 * EVERY character on the current page.
15 * <middelin@polyware.iaf.nl>
17 * Danny ter Haar : added cpuinfo
18 * <dth@cistron.nl>
20 * Alessandro Rubini : profile extension.
21 * <rubini@ipvvis.unipv.it>
23 * Jeff Tranter : added BogoMips field to cpuinfo
24 * <Jeff_Tranter@Mitel.COM>
26 * Bruno Haible : remove 4K limit for the maps file
27 * <haible@ma2s2.mathematik.uni-karlsruhe.de>
29 * Yves Arrouye : remove removal of trailing spaces in get_array.
30 * <Yves.Arrouye@marin.fdn.fr>
32 * Jerome Forissier : added per-CPU time information to /proc/stat
33 * and /proc/<pid>/cpu extension
34 * <forissier@isia.cma.fr>
35 * - Incorporation and non-SMP safe operation
36 * of forissier patch in 2.1.78 by
37 * Hans Marcus <crowbar@concepts.nl>
39 * aeb@cwi.nl : /proc/partitions
42 * Alan Cox : security fixes.
43 * <Alan.Cox@linux.org>
47 #include <linux/types.h>
48 #include <linux/errno.h>
49 #include <linux/sched.h>
50 #include <linux/kernel.h>
51 #include <linux/kernel_stat.h>
52 #include <linux/tty.h>
53 #include <linux/user.h>
54 #include <linux/a.out.h>
55 #include <linux/string.h>
56 #include <linux/mman.h>
57 #include <linux/proc_fs.h>
58 #include <linux/ioport.h>
59 #include <linux/config.h>
60 #include <linux/mm.h>
61 #include <linux/pagemap.h>
62 #include <linux/swap.h>
63 #include <linux/slab.h>
64 #include <linux/smp.h>
65 #include <linux/signal.h>
67 #include <asm/uaccess.h>
68 #include <asm/pgtable.h>
69 #include <asm/io.h>
71 #define LOAD_INT(x) ((x) >> FSHIFT)
72 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
74 #ifdef CONFIG_DEBUG_MALLOC
75 int get_malloc(char * buffer);
76 #endif
79 static ssize_t read_core(struct file * file, char * buf,
80 size_t count, loff_t *ppos)
82 unsigned long p = *ppos, memsize;
83 ssize_t read;
84 ssize_t count1;
85 char * pnt;
86 struct user dump;
87 #if defined (__i386__) || defined (__mc68000__)
88 # define FIRST_MAPPED PAGE_SIZE /* we don't have page 0 mapped on x86.. */
89 #else
90 # define FIRST_MAPPED 0
91 #endif
93 memset(&dump, 0, sizeof(struct user));
94 dump.magic = CMAGIC;
95 dump.u_dsize = max_mapnr;
96 #ifdef __alpha__
97 dump.start_data = PAGE_OFFSET;
98 #endif
100 memsize = (max_mapnr + 1) << PAGE_SHIFT;
101 if (p >= memsize)
102 return 0;
103 if (count > memsize - p)
104 count = memsize - p;
105 read = 0;
107 if (p < sizeof(struct user) && count > 0) {
108 count1 = count;
109 if (p + count1 > sizeof(struct user))
110 count1 = sizeof(struct user)-p;
111 pnt = (char *) &dump + p;
112 copy_to_user(buf,(void *) pnt, count1);
113 buf += count1;
114 p += count1;
115 count -= count1;
116 read += count1;
119 if (count > 0 && p < PAGE_SIZE + FIRST_MAPPED) {
120 count1 = PAGE_SIZE + FIRST_MAPPED - p;
121 if (count1 > count)
122 count1 = count;
123 clear_user(buf, count1);
124 buf += count1;
125 p += count1;
126 count -= count1;
127 read += count1;
129 if (count > 0) {
130 copy_to_user(buf, (void *) (PAGE_OFFSET+p-PAGE_SIZE), count);
131 read += count;
133 *ppos += read;
134 return read;
137 static struct file_operations proc_kcore_operations = {
138 NULL, /* lseek */
139 read_core,
142 struct inode_operations proc_kcore_inode_operations = {
143 &proc_kcore_operations,
147 * This function accesses profiling information. The returned data is
148 * binary: the sampling step and the actual contents of the profile
149 * buffer. Use of the program readprofile is recommended in order to
150 * get meaningful info out of these data.
152 static ssize_t read_profile(struct file *file, char *buf,
153 size_t count, loff_t *ppos)
155 unsigned long p = *ppos;
156 ssize_t read;
157 char * pnt;
158 unsigned int sample_step = 1 << prof_shift;
160 if (p >= (prof_len+1)*sizeof(unsigned int))
161 return 0;
162 if (count > (prof_len+1)*sizeof(unsigned int) - p)
163 count = (prof_len+1)*sizeof(unsigned int) - p;
164 read = 0;
166 while (p < sizeof(unsigned int) && count > 0) {
167 put_user(*((char *)(&sample_step)+p),buf);
168 buf++; p++; count--; read++;
170 pnt = (char *)prof_buffer + p - sizeof(unsigned int);
171 copy_to_user(buf,(void *)pnt,count);
172 read += count;
173 *ppos += read;
174 return read;
178 * Writing to /proc/profile resets the counters
180 * Writing a 'profiling multiplier' value into it also re-sets the profiling
181 * interrupt frequency, on architectures that support this.
183 static ssize_t write_profile(struct file * file, const char * buf,
184 size_t count, loff_t *ppos)
186 #ifdef __SMP__
187 extern int setup_profiling_timer (unsigned int multiplier);
189 if (count==sizeof(int)) {
190 unsigned int multiplier;
192 if (copy_from_user(&multiplier, buf, sizeof(int)))
193 return -EFAULT;
195 if (setup_profiling_timer(multiplier))
196 return -EINVAL;
198 #endif
200 memset(prof_buffer, 0, prof_len * sizeof(*prof_buffer));
201 return count;
204 static struct file_operations proc_profile_operations = {
205 NULL, /* lseek */
206 read_profile,
207 write_profile,
210 struct inode_operations proc_profile_inode_operations = {
211 &proc_profile_operations,
215 static int get_loadavg(char * buffer)
217 int a, b, c;
219 a = avenrun[0] + (FIXED_1/200);
220 b = avenrun[1] + (FIXED_1/200);
221 c = avenrun[2] + (FIXED_1/200);
222 return sprintf(buffer,"%d.%02d %d.%02d %d.%02d %d/%d %d\n",
223 LOAD_INT(a), LOAD_FRAC(a),
224 LOAD_INT(b), LOAD_FRAC(b),
225 LOAD_INT(c), LOAD_FRAC(c),
226 nr_running, nr_tasks, last_pid);
229 static int get_kstat(char * buffer)
231 int i, len;
232 unsigned sum = 0;
233 extern unsigned long total_forks;
234 unsigned long ticks;
236 ticks = jiffies * smp_num_cpus;
237 for (i = 0 ; i < NR_IRQS ; i++)
238 sum += kstat_irqs(i);
240 #ifdef __SMP__
241 len = sprintf(buffer,
242 "cpu %u %u %u %lu\n",
243 kstat.cpu_user,
244 kstat.cpu_nice,
245 kstat.cpu_system,
246 jiffies*smp_num_cpus - (kstat.cpu_user + kstat.cpu_nice + kstat.cpu_system));
247 for (i = 0 ; i < smp_num_cpus; i++)
248 len += sprintf(buffer + len, "cpu%d %u %u %u %lu\n",
250 kstat.per_cpu_user[cpu_logical_map(i)],
251 kstat.per_cpu_nice[cpu_logical_map(i)],
252 kstat.per_cpu_system[cpu_logical_map(i)],
253 jiffies - ( kstat.per_cpu_user[cpu_logical_map(i)] \
254 + kstat.per_cpu_nice[cpu_logical_map(i)] \
255 + kstat.per_cpu_system[cpu_logical_map(i)]));
256 len += sprintf(buffer + len,
257 "disk %u %u %u %u\n"
258 "disk_rio %u %u %u %u\n"
259 "disk_wio %u %u %u %u\n"
260 "disk_rblk %u %u %u %u\n"
261 "disk_wblk %u %u %u %u\n"
262 "page %u %u\n"
263 "swap %u %u\n"
264 "intr %u",
265 #else
266 len = sprintf(buffer,
267 "cpu %u %u %u %lu\n"
268 "disk %u %u %u %u\n"
269 "disk_rio %u %u %u %u\n"
270 "disk_wio %u %u %u %u\n"
271 "disk_rblk %u %u %u %u\n"
272 "disk_wblk %u %u %u %u\n"
273 "page %u %u\n"
274 "swap %u %u\n"
275 "intr %u",
276 kstat.cpu_user,
277 kstat.cpu_nice,
278 kstat.cpu_system,
279 ticks - (kstat.cpu_user + kstat.cpu_nice + kstat.cpu_system),
280 #endif
281 kstat.dk_drive[0], kstat.dk_drive[1],
282 kstat.dk_drive[2], kstat.dk_drive[3],
283 kstat.dk_drive_rio[0], kstat.dk_drive_rio[1],
284 kstat.dk_drive_rio[2], kstat.dk_drive_rio[3],
285 kstat.dk_drive_wio[0], kstat.dk_drive_wio[1],
286 kstat.dk_drive_wio[2], kstat.dk_drive_wio[3],
287 kstat.dk_drive_rblk[0], kstat.dk_drive_rblk[1],
288 kstat.dk_drive_rblk[2], kstat.dk_drive_rblk[3],
289 kstat.dk_drive_wblk[0], kstat.dk_drive_wblk[1],
290 kstat.dk_drive_wblk[2], kstat.dk_drive_wblk[3],
291 kstat.pgpgin,
292 kstat.pgpgout,
293 kstat.pswpin,
294 kstat.pswpout,
295 sum);
296 for (i = 0 ; i < NR_IRQS ; i++)
297 len += sprintf(buffer + len, " %u", kstat_irqs(i));
298 len += sprintf(buffer + len,
299 "\nctxt %u\n"
300 "btime %lu\n"
301 "processes %lu\n",
302 kstat.context_swtch,
303 xtime.tv_sec - jiffies / HZ,
304 total_forks);
305 return len;
309 static int get_uptime(char * buffer)
311 unsigned long uptime;
312 unsigned long idle;
314 uptime = jiffies;
315 idle = task[0]->times.tms_utime + task[0]->times.tms_stime;
317 /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
318 that would overflow about every five days at HZ == 100.
319 Therefore the identity a = (a / b) * b + a % b is used so that it is
320 calculated as (((t / HZ) * 100) + ((t % HZ) * 100) / HZ) % 100.
321 The part in front of the '+' always evaluates as 0 (mod 100). All divisions
322 in the above formulas are truncating. For HZ being a power of 10, the
323 calculations simplify to the version in the #else part (if the printf
324 format is adapted to the same number of digits as zeroes in HZ.
326 #if HZ!=100
327 return sprintf(buffer,"%lu.%02lu %lu.%02lu\n",
328 uptime / HZ,
329 (((uptime % HZ) * 100) / HZ) % 100,
330 idle / HZ,
331 (((idle % HZ) * 100) / HZ) % 100);
332 #else
333 return sprintf(buffer,"%lu.%02lu %lu.%02lu\n",
334 uptime / HZ,
335 uptime % HZ,
336 idle / HZ,
337 idle % HZ);
338 #endif
341 static int get_meminfo(char * buffer)
343 struct sysinfo i;
344 int len;
346 si_meminfo(&i);
347 si_swapinfo(&i);
348 len = sprintf(buffer, " total: used: free: shared: buffers: cached:\n"
349 "Mem: %8lu %8lu %8lu %8lu %8lu %8lu\n"
350 "Swap: %8lu %8lu %8lu\n",
351 i.totalram, i.totalram-i.freeram, i.freeram, i.sharedram, i.bufferram, atomic_read(&page_cache_size)*PAGE_SIZE,
352 i.totalswap, i.totalswap-i.freeswap, i.freeswap);
354 * Tagged format, for easy grepping and expansion. The above will go away
355 * eventually, once the tools have been updated.
357 return len + sprintf(buffer+len,
358 "MemTotal: %8lu kB\n"
359 "MemFree: %8lu kB\n"
360 "MemShared: %8lu kB\n"
361 "Buffers: %8lu kB\n"
362 "Cached: %8u kB\n"
363 "SwapTotal: %8lu kB\n"
364 "SwapFree: %8lu kB\n",
365 i.totalram >> 10,
366 i.freeram >> 10,
367 i.sharedram >> 10,
368 i.bufferram >> 10,
369 atomic_read(&page_cache_size) << (PAGE_SHIFT - 10),
370 i.totalswap >> 10,
371 i.freeswap >> 10);
374 static int get_version(char * buffer)
376 extern char *linux_banner;
378 strcpy(buffer, linux_banner);
379 return strlen(buffer);
382 static int get_cmdline(char * buffer)
384 extern char saved_command_line[];
386 return sprintf(buffer, "%s\n", saved_command_line);
389 static unsigned long get_phys_addr(struct task_struct * p, unsigned long ptr)
391 pgd_t *page_dir;
392 pmd_t *page_middle;
393 pte_t pte;
395 if (!p || !p->mm || ptr >= TASK_SIZE)
396 return 0;
397 /* Check for NULL pgd .. shouldn't happen! */
398 if (!p->mm->pgd) {
399 printk("get_phys_addr: pid %d has NULL pgd!\n", p->pid);
400 return 0;
403 page_dir = pgd_offset(p->mm,ptr);
404 if (pgd_none(*page_dir))
405 return 0;
406 if (pgd_bad(*page_dir)) {
407 printk("bad page directory entry %08lx\n", pgd_val(*page_dir));
408 pgd_clear(page_dir);
409 return 0;
411 page_middle = pmd_offset(page_dir,ptr);
412 if (pmd_none(*page_middle))
413 return 0;
414 if (pmd_bad(*page_middle)) {
415 printk("bad page middle entry %08lx\n", pmd_val(*page_middle));
416 pmd_clear(page_middle);
417 return 0;
419 pte = *pte_offset(page_middle,ptr);
420 if (!pte_present(pte))
421 return 0;
422 return pte_page(pte) + (ptr & ~PAGE_MASK);
425 static int get_array(struct task_struct *p, unsigned long start, unsigned long end, char * buffer)
427 unsigned long addr;
428 int size = 0, result = 0;
429 char c;
431 if (start >= end)
432 return result;
433 for (;;) {
434 addr = get_phys_addr(p, start);
435 if (!addr)
436 return result;
437 do {
438 c = *(char *) addr;
439 if (!c)
440 result = size;
441 if (size < PAGE_SIZE)
442 buffer[size++] = c;
443 else
444 return result;
445 addr++;
446 start++;
447 if (!c && start >= end)
448 return result;
449 } while (addr & ~PAGE_MASK);
451 return result;
454 static int get_env(int pid, char * buffer)
456 struct task_struct *p;
458 read_lock(&tasklist_lock);
459 p = find_task_by_pid(pid);
460 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
462 if (!p || !p->mm)
463 return 0;
464 return get_array(p, p->mm->env_start, p->mm->env_end, buffer);
467 static int get_arg(int pid, char * buffer)
469 struct task_struct *p;
471 read_lock(&tasklist_lock);
472 p = find_task_by_pid(pid);
473 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
474 if (!p || !p->mm)
475 return 0;
476 return get_array(p, p->mm->arg_start, p->mm->arg_end, buffer);
480 * These bracket the sleeping functions..
482 extern void scheduling_functions_start_here(void);
483 extern void scheduling_functions_end_here(void);
484 #define first_sched ((unsigned long) scheduling_functions_start_here)
485 #define last_sched ((unsigned long) scheduling_functions_end_here)
487 static unsigned long get_wchan(struct task_struct *p)
489 if (!p || p == current || p->state == TASK_RUNNING)
490 return 0;
491 #if defined(__i386__)
493 unsigned long ebp, esp, eip;
494 unsigned long stack_page;
495 int count = 0;
497 stack_page = (unsigned long)p;
498 esp = p->tss.esp;
499 if (!stack_page || esp < stack_page || esp >= 8188+stack_page)
500 return 0;
501 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
502 ebp = *(unsigned long *) esp;
503 do {
504 if (ebp < stack_page || ebp >= 8188+stack_page)
505 return 0;
506 eip = *(unsigned long *) (ebp+4);
507 if (eip < first_sched || eip >= last_sched)
508 return eip;
509 ebp = *(unsigned long *) ebp;
510 } while (count++ < 16);
512 #elif defined(__alpha__)
514 * This one depends on the frame size of schedule(). Do a
515 * "disass schedule" in gdb to find the frame size. Also, the
516 * code assumes that sleep_on() follows immediately after
517 * interruptible_sleep_on() and that add_timer() follows
518 * immediately after interruptible_sleep(). Ugly, isn't it?
519 * Maybe adding a wchan field to task_struct would be better,
520 * after all...
523 unsigned long schedule_frame;
524 unsigned long pc;
526 pc = thread_saved_pc(&p->tss);
527 if (pc >= first_sched && pc < last_sched) {
528 schedule_frame = ((unsigned long *)p->tss.ksp)[6];
529 return ((unsigned long *)schedule_frame)[12];
531 return pc;
533 #elif defined(__mc68000__)
535 unsigned long fp, pc;
536 unsigned long stack_page;
537 int count = 0;
539 stack_page = (unsigned long)p;
540 fp = ((struct switch_stack *)p->tss.ksp)->a6;
541 do {
542 if (fp < stack_page+sizeof(struct task_struct) ||
543 fp >= 8184+stack_page)
544 return 0;
545 pc = ((unsigned long *)fp)[1];
546 /* FIXME: This depends on the order of these functions. */
547 if (pc < first_sched || pc >= last_sched)
548 return pc;
549 fp = *(unsigned long *) fp;
550 } while (count++ < 16);
552 #elif defined(__powerpc__)
554 unsigned long ip, sp;
555 unsigned long stack_page = (unsigned long) p;
556 int count = 0;
558 sp = p->tss.ksp;
559 do {
560 sp = *(unsigned long *)sp;
561 if (sp < stack_page || sp >= stack_page + 8188)
562 return 0;
563 if (count > 0) {
564 ip = *(unsigned long *)(sp + 4);
565 if (ip < first_sched || ip >= last_sched)
566 return ip;
568 } while (count++ < 16);
570 #elif defined(__arm__)
572 unsigned long fp, lr;
573 unsigned long stack_page;
574 int count = 0;
576 stack_page = 4096 + (unsigned long)p;
577 fp = get_css_fp (&p->tss);
578 do {
579 if (fp < stack_page || fp > 4092+stack_page)
580 return 0;
581 lr = pc_pointer (((unsigned long *)fp)[-1]);
582 if (lr < first_sched || lr > last_sched)
583 return lr;
584 fp = *(unsigned long *) (fp - 12);
585 } while (count ++ < 16);
587 #elif defined (__sparc__)
589 unsigned long pc, fp, bias = 0;
590 unsigned long task_base = (unsigned long) p;
591 struct reg_window *rw;
592 int count = 0;
594 #ifdef __sparc_v9__
595 bias = STACK_BIAS;
596 #endif
597 fp = p->tss.ksp + bias;
598 do {
599 /* Bogus frame pointer? */
600 if (fp < (task_base + sizeof(struct task_struct)) ||
601 fp >= (task_base + (2 * PAGE_SIZE)))
602 break;
603 rw = (struct reg_window *) fp;
604 pc = rw->ins[7];
605 if (pc < first_sched || pc >= last_sched)
606 return pc;
607 fp = rw->ins[6] + bias;
608 } while (++count < 16);
610 #endif
611 return 0;
614 #if defined(__i386__)
615 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
616 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
617 #elif defined(__alpha__)
619 * See arch/alpha/kernel/ptrace.c for details.
621 # define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
622 + (long)&((struct pt_regs *)0)->reg)
623 # define KSTK_EIP(tsk) \
624 (*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
625 # define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
626 #elif defined(__arm__)
627 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
628 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
629 #elif defined(__mc68000__)
630 #define KSTK_EIP(tsk) \
631 ({ \
632 unsigned long eip = 0; \
633 if ((tsk)->tss.esp0 > PAGE_SIZE && \
634 MAP_NR((tsk)->tss.esp0) < max_mapnr) \
635 eip = ((struct pt_regs *) (tsk)->tss.esp0)->pc; \
636 eip; })
637 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
638 #elif defined(__powerpc__)
639 #define KSTK_EIP(tsk) ((tsk)->tss.regs->nip)
640 #define KSTK_ESP(tsk) ((tsk)->tss.regs->gpr[1])
641 #elif defined (__sparc_v9__)
642 # define KSTK_EIP(tsk) ((tsk)->tss.kregs->tpc)
643 # define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
644 #elif defined(__sparc__)
645 # define KSTK_EIP(tsk) ((tsk)->tss.kregs->pc)
646 # define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
647 #endif
649 /* Gcc optimizes away "strlen(x)" for constant x */
650 #define ADDBUF(buffer, string) \
651 do { memcpy(buffer, string, strlen(string)); \
652 buffer += strlen(string); } while (0)
654 static inline char * task_name(struct task_struct *p, char * buf)
656 int i;
657 char * name;
659 ADDBUF(buf, "Name:\t");
660 name = p->comm;
661 i = sizeof(p->comm);
662 do {
663 unsigned char c = *name;
664 name++;
665 i--;
666 *buf = c;
667 if (!c)
668 break;
669 if (c == '\\') {
670 buf[1] = c;
671 buf += 2;
672 continue;
674 if (c == '\n') {
675 buf[0] = '\\';
676 buf[1] = 'n';
677 buf += 2;
678 continue;
680 buf++;
681 } while (i);
682 *buf = '\n';
683 return buf+1;
687 * The task state array is a strange "bitmap" of
688 * reasons to sleep. Thus "running" is zero, and
689 * you can test for combinations of others with
690 * simple bit tests.
692 static const char *task_state_array[] = {
693 "R (running)", /* 0 */
694 "S (sleeping)", /* 1 */
695 "D (disk sleep)", /* 2 */
696 "Z (zombie)", /* 4 */
697 "T (stopped)", /* 8 */
698 "W (paging)" /* 16 */
701 static inline const char * get_task_state(struct task_struct *tsk)
703 unsigned int state = tsk->state & (TASK_RUNNING |
704 TASK_INTERRUPTIBLE |
705 TASK_UNINTERRUPTIBLE |
706 TASK_ZOMBIE |
707 TASK_STOPPED |
708 TASK_SWAPPING);
709 const char **p = &task_state_array[0];
711 while (state) {
712 p++;
713 state >>= 1;
715 return *p;
718 static inline char * task_state(struct task_struct *p, char *buffer)
720 int g;
722 buffer += sprintf(buffer,
723 "State:\t%s\n"
724 "Pid:\t%d\n"
725 "PPid:\t%d\n"
726 "Uid:\t%d\t%d\t%d\t%d\n"
727 "Gid:\t%d\t%d\t%d\t%d\n"
728 "Groups:\t",
729 get_task_state(p),
730 p->pid, p->p_pptr->pid,
731 p->uid, p->euid, p->suid, p->fsuid,
732 p->gid, p->egid, p->sgid, p->fsgid);
734 for (g = 0; g < p->ngroups; g++)
735 buffer += sprintf(buffer, "%d ", p->groups[g]);
737 buffer += sprintf(buffer, "\n");
738 return buffer;
741 static inline char * task_mem(struct task_struct *p, char *buffer)
743 struct mm_struct * mm = p->mm;
745 if (mm && mm != &init_mm) {
746 struct vm_area_struct * vma = mm->mmap;
747 unsigned long data = 0, stack = 0;
748 unsigned long exec = 0, lib = 0;
750 for (vma = mm->mmap; vma; vma = vma->vm_next) {
751 unsigned long len = (vma->vm_end - vma->vm_start) >> 10;
752 if (!vma->vm_file) {
753 data += len;
754 if (vma->vm_flags & VM_GROWSDOWN)
755 stack += len;
756 continue;
758 if (vma->vm_flags & VM_WRITE)
759 continue;
760 if (vma->vm_flags & VM_EXEC) {
761 exec += len;
762 if (vma->vm_flags & VM_EXECUTABLE)
763 continue;
764 lib += len;
767 buffer += sprintf(buffer,
768 "VmSize:\t%8lu kB\n"
769 "VmLck:\t%8lu kB\n"
770 "VmRSS:\t%8lu kB\n"
771 "VmData:\t%8lu kB\n"
772 "VmStk:\t%8lu kB\n"
773 "VmExe:\t%8lu kB\n"
774 "VmLib:\t%8lu kB\n",
775 mm->total_vm << (PAGE_SHIFT-10),
776 mm->locked_vm << (PAGE_SHIFT-10),
777 mm->rss << (PAGE_SHIFT-10),
778 data - stack, stack,
779 exec - lib, lib);
781 return buffer;
784 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
785 sigset_t *catch)
787 struct k_sigaction *k;
788 int i;
790 sigemptyset(ign);
791 sigemptyset(catch);
793 if (p->sig) {
794 k = p->sig->action;
795 for (i = 1; i <= _NSIG; ++i, ++k) {
796 if (k->sa.sa_handler == SIG_IGN)
797 sigaddset(ign, i);
798 else if (k->sa.sa_handler != SIG_DFL)
799 sigaddset(catch, i);
804 static inline char * task_sig(struct task_struct *p, char *buffer)
806 sigset_t ign, catch;
808 buffer += sprintf(buffer, "SigPnd:\t");
809 buffer = render_sigset_t(&p->signal, buffer);
810 *buffer++ = '\n';
811 buffer += sprintf(buffer, "SigBlk:\t");
812 buffer = render_sigset_t(&p->blocked, buffer);
813 *buffer++ = '\n';
815 collect_sigign_sigcatch(p, &ign, &catch);
816 buffer += sprintf(buffer, "SigIgn:\t");
817 buffer = render_sigset_t(&ign, buffer);
818 *buffer++ = '\n';
819 buffer += sprintf(buffer, "SigCgt:\t"); /* Linux 2.0 uses "SigCgt" */
820 buffer = render_sigset_t(&catch, buffer);
821 *buffer++ = '\n';
823 return buffer;
826 extern inline char *task_cap(struct task_struct *p, char *buffer)
828 return buffer + sprintf(buffer, "CapInh:\t%016x\n"
829 "CapPrm:\t%016x\n"
830 "CapEff:\t%016x\n",
831 cap_t(p->cap_inheritable),
832 cap_t(p->cap_permitted),
833 cap_t(p->cap_effective));
837 static int get_status(int pid, char * buffer)
839 char * orig = buffer;
840 struct task_struct *tsk;
842 read_lock(&tasklist_lock);
843 tsk = find_task_by_pid(pid);
844 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
845 if (!tsk)
846 return 0;
847 buffer = task_name(tsk, buffer);
848 buffer = task_state(tsk, buffer);
849 buffer = task_mem(tsk, buffer);
850 buffer = task_sig(tsk, buffer);
851 buffer = task_cap(tsk, buffer);
852 return buffer - orig;
855 static int get_stat(int pid, char * buffer)
857 struct task_struct *tsk;
858 unsigned long vsize, eip, esp, wchan;
859 long priority, nice;
860 int tty_pgrp;
861 sigset_t sigign, sigcatch;
862 char state;
864 read_lock(&tasklist_lock);
865 tsk = find_task_by_pid(pid);
866 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
867 if (!tsk)
868 return 0;
869 state = *get_task_state(tsk);
870 vsize = eip = esp = 0;
871 if (tsk->mm && tsk->mm != &init_mm) {
872 struct vm_area_struct *vma = tsk->mm->mmap;
873 while (vma) {
874 vsize += vma->vm_end - vma->vm_start;
875 vma = vma->vm_next;
877 eip = KSTK_EIP(tsk);
878 esp = KSTK_ESP(tsk);
881 wchan = get_wchan(tsk);
883 collect_sigign_sigcatch(tsk, &sigign, &sigcatch);
885 if (tsk->tty)
886 tty_pgrp = tsk->tty->pgrp;
887 else
888 tty_pgrp = -1;
890 /* scale priority and nice values from timeslices to -20..20 */
891 /* to make it look like a "normal" Unix priority/nice value */
892 priority = tsk->counter;
893 priority = 20 - (priority * 10 + DEF_PRIORITY / 2) / DEF_PRIORITY;
894 nice = tsk->priority;
895 nice = 20 - (nice * 20 + DEF_PRIORITY / 2) / DEF_PRIORITY;
897 return sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
898 %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \
899 %lu %lu %lu %lu %lu %lu %lu %lu %d %d\n",
900 pid,
901 tsk->comm,
902 state,
903 tsk->p_pptr->pid,
904 tsk->pgrp,
905 tsk->session,
906 tsk->tty ? kdev_t_to_nr(tsk->tty->device) : 0,
907 tty_pgrp,
908 tsk->flags,
909 tsk->min_flt,
910 tsk->cmin_flt,
911 tsk->maj_flt,
912 tsk->cmaj_flt,
913 tsk->times.tms_utime,
914 tsk->times.tms_stime,
915 tsk->times.tms_cutime,
916 tsk->times.tms_cstime,
917 priority,
918 nice,
919 0UL /* removed */,
920 tsk->it_real_value,
921 tsk->start_time,
922 vsize,
923 tsk->mm ? tsk->mm->rss : 0, /* you might want to shift this left 3 */
924 tsk->rlim ? tsk->rlim[RLIMIT_RSS].rlim_cur : 0,
925 tsk->mm ? tsk->mm->start_code : 0,
926 tsk->mm ? tsk->mm->end_code : 0,
927 tsk->mm ? tsk->mm->start_stack : 0,
928 esp,
929 eip,
930 /* The signal information here is obsolete.
931 * It must be decimal for Linux 2.0 compatibility.
932 * Use /proc/#/status for real-time signals.
934 tsk->signal .sig[0] & 0x7fffffffUL,
935 tsk->blocked.sig[0] & 0x7fffffffUL,
936 sigign .sig[0] & 0x7fffffffUL,
937 sigcatch .sig[0] & 0x7fffffffUL,
938 wchan,
939 tsk->nswap,
940 tsk->cnswap,
941 tsk->exit_signal,
942 tsk->processor);
945 static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned long size,
946 int * pages, int * shared, int * dirty, int * total)
948 pte_t * pte;
949 unsigned long end;
951 if (pmd_none(*pmd))
952 return;
953 if (pmd_bad(*pmd)) {
954 printk("statm_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
955 pmd_clear(pmd);
956 return;
958 pte = pte_offset(pmd, address);
959 address &= ~PMD_MASK;
960 end = address + size;
961 if (end > PMD_SIZE)
962 end = PMD_SIZE;
963 do {
964 pte_t page = *pte;
966 address += PAGE_SIZE;
967 pte++;
968 if (pte_none(page))
969 continue;
970 ++*total;
971 if (!pte_present(page))
972 continue;
973 ++*pages;
974 if (pte_dirty(page))
975 ++*dirty;
976 if (MAP_NR(pte_page(page)) >= max_mapnr)
977 continue;
978 if (page_count(mem_map + MAP_NR(pte_page(page))) > 1)
979 ++*shared;
980 } while (address < end);
983 static inline void statm_pmd_range(pgd_t * pgd, unsigned long address, unsigned long size,
984 int * pages, int * shared, int * dirty, int * total)
986 pmd_t * pmd;
987 unsigned long end;
989 if (pgd_none(*pgd))
990 return;
991 if (pgd_bad(*pgd)) {
992 printk("statm_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
993 pgd_clear(pgd);
994 return;
996 pmd = pmd_offset(pgd, address);
997 address &= ~PGDIR_MASK;
998 end = address + size;
999 if (end > PGDIR_SIZE)
1000 end = PGDIR_SIZE;
1001 do {
1002 statm_pte_range(pmd, address, end - address, pages, shared, dirty, total);
1003 address = (address + PMD_SIZE) & PMD_MASK;
1004 pmd++;
1005 } while (address < end);
1008 static void statm_pgd_range(pgd_t * pgd, unsigned long address, unsigned long end,
1009 int * pages, int * shared, int * dirty, int * total)
1011 while (address < end) {
1012 statm_pmd_range(pgd, address, end - address, pages, shared, dirty, total);
1013 address = (address + PGDIR_SIZE) & PGDIR_MASK;
1014 pgd++;
1018 static int get_statm(int pid, char * buffer)
1020 struct task_struct *tsk;
1021 int size=0, resident=0, share=0, trs=0, lrs=0, drs=0, dt=0;
1023 read_lock(&tasklist_lock);
1024 tsk = find_task_by_pid(pid);
1025 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
1026 if (!tsk)
1027 return 0;
1028 if (tsk->mm && tsk->mm != &init_mm) {
1029 struct vm_area_struct * vma = tsk->mm->mmap;
1031 while (vma) {
1032 pgd_t *pgd = pgd_offset(tsk->mm, vma->vm_start);
1033 int pages = 0, shared = 0, dirty = 0, total = 0;
1035 statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total);
1036 resident += pages;
1037 share += shared;
1038 dt += dirty;
1039 size += total;
1040 if (vma->vm_flags & VM_EXECUTABLE)
1041 trs += pages; /* text */
1042 else if (vma->vm_flags & VM_GROWSDOWN)
1043 drs += pages; /* stack */
1044 else if (vma->vm_end > 0x60000000)
1045 lrs += pages; /* library */
1046 else
1047 drs += pages;
1048 vma = vma->vm_next;
1051 return sprintf(buffer,"%d %d %d %d %d %d %d\n",
1052 size, resident, share, trs, lrs, drs, dt);
1056 * The way we support synthetic files > 4K
1057 * - without storing their contents in some buffer and
1058 * - without walking through the entire synthetic file until we reach the
1059 * position of the requested data
1060 * is to cleverly encode the current position in the file's f_pos field.
1061 * There is no requirement that a read() call which returns `count' bytes
1062 * of data increases f_pos by exactly `count'.
1064 * This idea is Linus' one. Bruno implemented it.
1068 * For the /proc/<pid>/maps file, we use fixed length records, each containing
1069 * a single line.
1071 #define MAPS_LINE_LENGTH 4096
1072 #define MAPS_LINE_SHIFT 12
1074 * f_pos = (number of the vma in the task->mm->mmap list) * MAPS_LINE_LENGTH
1075 * + (index into the line)
1077 /* for systems with sizeof(void*) == 4: */
1078 #define MAPS_LINE_FORMAT4 "%08lx-%08lx %s %08lx %s %lu"
1079 #define MAPS_LINE_MAX4 49 /* sum of 8 1 8 1 4 1 8 1 5 1 10 1 */
1081 /* for systems with sizeof(void*) == 8: */
1082 #define MAPS_LINE_FORMAT8 "%016lx-%016lx %s %016lx %s %lu"
1083 #define MAPS_LINE_MAX8 73 /* sum of 16 1 16 1 4 1 16 1 5 1 10 1 */
1085 #define MAPS_LINE_MAX MAPS_LINE_MAX8
1088 static ssize_t read_maps (int pid, struct file * file, char * buf,
1089 size_t count, loff_t *ppos)
1091 struct task_struct *p;
1092 struct vm_area_struct * map, * next;
1093 char * destptr = buf, * buffer;
1094 loff_t lineno;
1095 ssize_t column, i;
1096 int volatile_task;
1097 long retval;
1100 * We might sleep getting the page, so get it first.
1102 retval = -ENOMEM;
1103 buffer = (char*)__get_free_page(GFP_KERNEL);
1104 if (!buffer)
1105 goto out;
1107 retval = -EINVAL;
1108 read_lock(&tasklist_lock);
1109 p = find_task_by_pid(pid);
1110 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
1111 if (!p)
1112 goto freepage_out;
1114 if (!p->mm || p->mm == &init_mm || count == 0)
1115 goto getlen_out;
1117 /* Check whether the mmaps could change if we sleep */
1118 volatile_task = (p != current || atomic_read(&p->mm->count) > 1);
1120 /* decode f_pos */
1121 lineno = *ppos >> MAPS_LINE_SHIFT;
1122 column = *ppos & (MAPS_LINE_LENGTH-1);
1124 /* quickly go to line lineno */
1125 for (map = p->mm->mmap, i = 0; map && (i < lineno); map = map->vm_next, i++)
1126 continue;
1128 for ( ; map ; map = next ) {
1129 /* produce the next line */
1130 char *line;
1131 char str[5], *cp = str;
1132 int flags;
1133 kdev_t dev;
1134 unsigned long ino;
1135 int maxlen = (sizeof(void*) == 4) ?
1136 MAPS_LINE_MAX4 : MAPS_LINE_MAX8;
1137 int len;
1140 * Get the next vma now (but it won't be used if we sleep).
1142 next = map->vm_next;
1143 flags = map->vm_flags;
1145 *cp++ = flags & VM_READ ? 'r' : '-';
1146 *cp++ = flags & VM_WRITE ? 'w' : '-';
1147 *cp++ = flags & VM_EXEC ? 'x' : '-';
1148 *cp++ = flags & VM_MAYSHARE ? 's' : 'p';
1149 *cp++ = 0;
1151 dev = 0;
1152 ino = 0;
1153 if (map->vm_file != NULL) {
1154 dev = map->vm_file->f_dentry->d_inode->i_dev;
1155 ino = map->vm_file->f_dentry->d_inode->i_ino;
1156 line = d_path(map->vm_file->f_dentry, buffer, PAGE_SIZE);
1157 buffer[PAGE_SIZE-1] = '\n';
1158 line -= maxlen;
1159 if(line < buffer)
1160 line = buffer;
1161 } else
1162 line = buffer;
1164 len = sprintf(line,
1165 sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8,
1166 map->vm_start, map->vm_end, str, map->vm_offset,
1167 kdevname(dev), ino);
1169 if(map->vm_file) {
1170 for(i = len; i < maxlen; i++)
1171 line[i] = ' ';
1172 len = buffer + PAGE_SIZE - line;
1173 } else
1174 line[len++] = '\n';
1175 if (column >= len) {
1176 column = 0; /* continue with next line at column 0 */
1177 lineno++;
1178 continue; /* we haven't slept */
1181 i = len-column;
1182 if (i > count)
1183 i = count;
1184 copy_to_user(destptr, line+column, i); /* may have slept */
1185 destptr += i;
1186 count -= i;
1187 column += i;
1188 if (column >= len) {
1189 column = 0; /* next time: next line at column 0 */
1190 lineno++;
1193 /* done? */
1194 if (count == 0)
1195 break;
1197 /* By writing to user space, we might have slept.
1198 * Stop the loop, to avoid a race condition.
1200 if (volatile_task)
1201 break;
1204 /* encode f_pos */
1205 *ppos = (lineno << MAPS_LINE_SHIFT) + column;
1207 getlen_out:
1208 retval = destptr - buf;
1210 freepage_out:
1211 free_page((unsigned long)buffer);
1212 out:
1213 return retval;
1216 #ifdef __SMP__
1217 static int get_pidcpu(int pid, char * buffer)
1219 struct task_struct * tsk = current ;
1220 int i, len;
1222 read_lock(&tasklist_lock);
1223 if (pid != tsk->pid)
1224 tsk = find_task_by_pid(pid);
1225 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
1227 if (tsk == NULL)
1228 return 0;
1230 len = sprintf(buffer,
1231 "cpu %lu %lu\n",
1232 tsk->times.tms_utime,
1233 tsk->times.tms_stime);
1235 for (i = 0 ; i < smp_num_cpus; i++)
1236 len += sprintf(buffer + len, "cpu%d %lu %lu\n",
1238 tsk->per_cpu_utime[cpu_logical_map(i)],
1239 tsk->per_cpu_stime[cpu_logical_map(i)]);
1241 return len;
1243 #endif
1245 #ifdef CONFIG_MODULES
1246 extern int get_module_list(char *);
1247 extern int get_ksyms_list(char *, char **, off_t, int);
1248 #endif
1249 extern int get_device_list(char *);
1250 extern int get_partition_list(char *);
1251 extern int get_filesystem_list(char *);
1252 extern int get_filesystem_info( char * );
1253 extern int get_irq_list(char *);
1254 extern int get_dma_list(char *);
1255 extern int get_cpuinfo(char *);
1256 extern int get_pci_list(char *);
1257 extern int get_md_status (char *);
1258 extern int get_rtc_status (char *);
1259 extern int get_locks_status (char *, char **, off_t, int);
1260 extern int get_swaparea_info (char *);
1261 extern int get_hardware_list(char *);
1262 extern int get_stram_list(char *);
1264 static long get_root_array(char * page, int type, char **start,
1265 off_t offset, unsigned long length)
1267 switch (type) {
1268 case PROC_LOADAVG:
1269 return get_loadavg(page);
1271 case PROC_UPTIME:
1272 return get_uptime(page);
1274 case PROC_MEMINFO:
1275 return get_meminfo(page);
1277 #ifdef CONFIG_PCI_OLD_PROC
1278 case PROC_PCI:
1279 return get_pci_list(page);
1280 #endif
1282 #ifdef CONFIG_NUBUS
1283 case PROC_NUBUS:
1284 return get_nubus_list(page);
1285 #endif
1287 case PROC_CPUINFO:
1288 return get_cpuinfo(page);
1290 case PROC_VERSION:
1291 return get_version(page);
1293 #ifdef CONFIG_DEBUG_MALLOC
1294 case PROC_MALLOC:
1295 return get_malloc(page);
1296 #endif
1298 #ifdef CONFIG_MODULES
1299 case PROC_MODULES:
1300 return get_module_list(page);
1302 case PROC_KSYMS:
1303 return get_ksyms_list(page, start, offset, length);
1304 #endif
1306 case PROC_STAT:
1307 return get_kstat(page);
1309 case PROC_SLABINFO:
1310 return get_slabinfo(page);
1312 case PROC_DEVICES:
1313 return get_device_list(page);
1315 case PROC_PARTITIONS:
1316 return get_partition_list(page);
1318 case PROC_INTERRUPTS:
1319 return get_irq_list(page);
1321 case PROC_FILESYSTEMS:
1322 return get_filesystem_list(page);
1324 case PROC_DMA:
1325 return get_dma_list(page);
1327 case PROC_IOPORTS:
1328 return get_ioport_list(page);
1330 case PROC_MEMORY:
1331 return get_mem_list(page);
1332 #ifdef CONFIG_BLK_DEV_MD
1333 case PROC_MD:
1334 return get_md_status(page);
1335 #endif
1336 case PROC_CMDLINE:
1337 return get_cmdline(page);
1339 case PROC_MTAB:
1340 return get_filesystem_info( page );
1342 case PROC_SWAP:
1343 return get_swaparea_info(page);
1344 #ifdef CONFIG_RTC
1345 case PROC_RTC:
1346 return get_rtc_status(page);
1347 #endif
1348 case PROC_LOCKS:
1349 return get_locks_status(page, start, offset, length);
1350 #ifdef CONFIG_PROC_HARDWARE
1351 case PROC_HARDWARE:
1352 return get_hardware_list(page);
1353 #endif
1354 #ifdef CONFIG_STRAM_PROC
1355 case PROC_STRAM:
1356 return get_stram_list(page);
1357 #endif
1359 return -EBADF;
1362 static int process_unauthorized(int type, int pid)
1364 struct task_struct *p;
1365 uid_t euid=0; /* Save the euid keep the lock short */
1366 int ok = 0;
1368 read_lock(&tasklist_lock);
1371 * Grab the lock, find the task, save the uid and
1372 * check it has an mm still (ie its not dead)
1375 p = find_task_by_pid(pid);
1376 if (p) {
1377 euid=p->euid;
1378 ok = p->dumpable;
1379 if(!cap_issubset(p->cap_permitted, current->cap_permitted))
1380 ok=0;
1381 if(!p->mm) /* Scooby scooby doo where are you ? */
1382 p=NULL;
1385 read_unlock(&tasklist_lock);
1387 if (!p)
1388 return 1;
1390 switch(type)
1392 case PROC_PID_STATUS:
1393 case PROC_PID_STATM:
1394 case PROC_PID_STAT:
1395 case PROC_PID_MAPS:
1396 case PROC_PID_CMDLINE:
1397 case PROC_PID_CPU:
1398 return 0;
1400 if(capable(CAP_DAC_OVERRIDE) || (current->fsuid == euid && ok))
1401 return 0;
1402 return 1;
1406 static int get_process_array(char * page, int pid, int type)
1408 switch (type) {
1409 case PROC_PID_STATUS:
1410 return get_status(pid, page);
1411 case PROC_PID_ENVIRON:
1412 return get_env(pid, page);
1413 case PROC_PID_CMDLINE:
1414 return get_arg(pid, page);
1415 case PROC_PID_STAT:
1416 return get_stat(pid, page);
1417 case PROC_PID_STATM:
1418 return get_statm(pid, page);
1419 #ifdef __SMP__
1420 case PROC_PID_CPU:
1421 return get_pidcpu(pid, page);
1422 #endif
1424 return -EBADF;
1428 static inline int fill_array(char * page, int pid, int type, char **start, off_t offset, int length)
1430 if (pid)
1431 return get_process_array(page, pid, type);
1432 return get_root_array(page, type, start, offset, length);
1435 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
1437 static ssize_t array_read(struct file * file, char * buf,
1438 size_t count, loff_t *ppos)
1440 struct inode * inode = file->f_dentry->d_inode;
1441 unsigned long page;
1442 char *start;
1443 ssize_t length;
1444 ssize_t end;
1445 unsigned int type, pid;
1446 struct proc_dir_entry *dp;
1448 if (count > PROC_BLOCK_SIZE)
1449 count = PROC_BLOCK_SIZE;
1450 if (!(page = __get_free_page(GFP_KERNEL)))
1451 return -ENOMEM;
1452 type = inode->i_ino;
1453 pid = type >> 16;
1454 type &= 0x0000ffff;
1455 start = NULL;
1456 dp = (struct proc_dir_entry *) inode->u.generic_ip;
1458 if (pid && process_unauthorized(type, pid))
1460 free_page(page);
1461 return -EIO;
1464 if (dp->get_info)
1465 length = dp->get_info((char *)page, &start, *ppos,
1466 count, 0);
1467 else
1468 length = fill_array((char *) page, pid, type,
1469 &start, *ppos, count);
1470 if (length < 0) {
1471 free_page(page);
1472 return length;
1474 if (start != NULL) {
1475 /* We have had block-adjusting processing! */
1476 copy_to_user(buf, start, length);
1477 *ppos += length;
1478 count = length;
1479 } else {
1480 /* Static 4kB (or whatever) block capacity */
1481 if (*ppos >= length) {
1482 free_page(page);
1483 return 0;
1485 if (count + *ppos > length)
1486 count = length - *ppos;
1487 end = count + *ppos;
1488 copy_to_user(buf, (char *) page + *ppos, count);
1489 *ppos = end;
1491 free_page(page);
1492 return count;
1495 static struct file_operations proc_array_operations = {
1496 NULL, /* array_lseek */
1497 array_read,
1498 NULL, /* array_write */
1499 NULL, /* array_readdir */
1500 NULL, /* array_poll */
1501 NULL, /* array_ioctl */
1502 NULL, /* mmap */
1503 NULL, /* no special open code */
1504 NULL, /* flush */
1505 NULL, /* no special release code */
1506 NULL /* can't fsync */
1509 struct inode_operations proc_array_inode_operations = {
1510 &proc_array_operations, /* default base directory file-ops */
1511 NULL, /* create */
1512 NULL, /* lookup */
1513 NULL, /* link */
1514 NULL, /* unlink */
1515 NULL, /* symlink */
1516 NULL, /* mkdir */
1517 NULL, /* rmdir */
1518 NULL, /* mknod */
1519 NULL, /* rename */
1520 NULL, /* readlink */
1521 NULL, /* follow_link */
1522 NULL, /* get_block */
1523 NULL, /* readpage */
1524 NULL, /* writepage */
1525 NULL, /* flushpage */
1526 NULL, /* truncate */
1527 NULL, /* permission */
1528 NULL, /* smap */
1529 NULL /* revalidate */
1532 static ssize_t arraylong_read(struct file * file, char * buf,
1533 size_t count, loff_t *ppos)
1535 struct inode * inode = file->f_dentry->d_inode;
1536 unsigned int pid = inode->i_ino >> 16;
1537 unsigned int type = inode->i_ino & 0x0000ffff;
1539 switch (type) {
1540 case PROC_PID_MAPS:
1541 return read_maps(pid, file, buf, count, ppos);
1543 return -EINVAL;
1546 static struct file_operations proc_arraylong_operations = {
1547 NULL, /* array_lseek */
1548 arraylong_read,
1549 NULL, /* array_write */
1550 NULL, /* array_readdir */
1551 NULL, /* array_poll */
1552 NULL, /* array_ioctl */
1553 NULL, /* mmap */
1554 NULL, /* no special open code */
1555 NULL, /* flush */
1556 NULL, /* no special release code */
1557 NULL /* can't fsync */
1560 struct inode_operations proc_arraylong_inode_operations = {
1561 &proc_arraylong_operations, /* default base directory file-ops */
1562 NULL, /* create */
1563 NULL, /* lookup */
1564 NULL, /* link */
1565 NULL, /* unlink */
1566 NULL, /* symlink */
1567 NULL, /* mkdir */
1568 NULL, /* rmdir */
1569 NULL, /* mknod */
1570 NULL, /* rename */
1571 NULL, /* readlink */
1572 NULL, /* follow_link */
1573 NULL, /* get_block */
1574 NULL, /* readpage */
1575 NULL, /* writepage */
1576 NULL, /* flushpage */
1577 NULL, /* truncate */
1578 NULL, /* permission */
1579 NULL, /* smap */
1580 NULL /* revalidate */