2.2.0-final
[davej-history.git] / fs / proc / array.c
blobf7f28e0e66b1bf27eab9cb24d9384391a023bd47
1 /*
2 * linux/fs/proc/array.c
4 * Copyright (C) 1992 by Linus Torvalds
5 * based on ideas by Darren Senn
7 * Fixes:
8 * Michael. K. Johnson: stat,statm extensions.
9 * <johnsonm@stolaf.edu>
11 * Pauline Middelink : Made cmdline,envline only break at '\0's, to
12 * make sure SET_PROCTITLE works. Also removed
13 * bad '!' which forced address recalculation for
14 * EVERY character on the current page.
15 * <middelin@polyware.iaf.nl>
17 * Danny ter Haar : added cpuinfo
18 * <dth@cistron.nl>
20 * Alessandro Rubini : profile extension.
21 * <rubini@ipvvis.unipv.it>
23 * Jeff Tranter : added BogoMips field to cpuinfo
24 * <Jeff_Tranter@Mitel.COM>
26 * Bruno Haible : remove 4K limit for the maps file
27 * <haible@ma2s2.mathematik.uni-karlsruhe.de>
29 * Yves Arrouye : remove removal of trailing spaces in get_array.
30 * <Yves.Arrouye@marin.fdn.fr>
32 * Jerome Forissier : added per-CPU time information to /proc/stat
33 * and /proc/<pid>/cpu extension
34 * <forissier@isia.cma.fr>
35 * - Incorporation and non-SMP safe operation
36 * of forissier patch in 2.1.78 by
37 * Hans Marcus <crowbar@concepts.nl>
39 * aeb@cwi.nl : /proc/partitions
42 * Alan Cox : security fixes.
43 * <Alan.Cox@linux.org>
45 * Andi Kleen : Race Fixes.
49 #include <linux/types.h>
50 #include <linux/errno.h>
51 #include <linux/sched.h>
52 #include <linux/kernel.h>
53 #include <linux/kernel_stat.h>
54 #include <linux/tty.h>
55 #include <linux/user.h>
56 #include <linux/a.out.h>
57 #include <linux/string.h>
58 #include <linux/mman.h>
59 #include <linux/proc_fs.h>
60 #include <linux/ioport.h>
61 #include <linux/config.h>
62 #include <linux/mm.h>
63 #include <linux/pagemap.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/smp.h>
67 #include <linux/signal.h>
69 #include <asm/uaccess.h>
70 #include <asm/pgtable.h>
71 #include <asm/io.h>
73 #define LOAD_INT(x) ((x) >> FSHIFT)
74 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
76 #ifdef CONFIG_DEBUG_MALLOC
77 int get_malloc(char * buffer);
78 #endif
81 static ssize_t read_core(struct file * file, char * buf,
82 size_t count, loff_t *ppos)
84 unsigned long p = *ppos, memsize;
85 ssize_t read;
86 ssize_t count1;
87 char * pnt;
88 struct user dump;
89 #if defined (__i386__) || defined (__mc68000__)
90 # define FIRST_MAPPED PAGE_SIZE /* we don't have page 0 mapped on x86.. */
91 #else
92 # define FIRST_MAPPED 0
93 #endif
95 memset(&dump, 0, sizeof(struct user));
96 dump.magic = CMAGIC;
97 dump.u_dsize = max_mapnr;
98 #ifdef __alpha__
99 dump.start_data = PAGE_OFFSET;
100 #endif
102 memsize = (max_mapnr + 1) << PAGE_SHIFT;
103 if (p >= memsize)
104 return 0;
105 if (count > memsize - p)
106 count = memsize - p;
107 read = 0;
109 if (p < sizeof(struct user) && count > 0) {
110 count1 = count;
111 if (p + count1 > sizeof(struct user))
112 count1 = sizeof(struct user)-p;
113 pnt = (char *) &dump + p;
114 copy_to_user(buf,(void *) pnt, count1);
115 buf += count1;
116 p += count1;
117 count -= count1;
118 read += count1;
121 if (count > 0 && p < PAGE_SIZE + FIRST_MAPPED) {
122 count1 = PAGE_SIZE + FIRST_MAPPED - p;
123 if (count1 > count)
124 count1 = count;
125 clear_user(buf, count1);
126 buf += count1;
127 p += count1;
128 count -= count1;
129 read += count1;
131 if (count > 0) {
132 copy_to_user(buf, (void *) (PAGE_OFFSET+p-PAGE_SIZE), count);
133 read += count;
135 *ppos += read;
136 return read;
139 static struct file_operations proc_kcore_operations = {
140 NULL, /* lseek */
141 read_core,
144 struct inode_operations proc_kcore_inode_operations = {
145 &proc_kcore_operations,
149 * This function accesses profiling information. The returned data is
150 * binary: the sampling step and the actual contents of the profile
151 * buffer. Use of the program readprofile is recommended in order to
152 * get meaningful info out of these data.
154 static ssize_t read_profile(struct file *file, char *buf,
155 size_t count, loff_t *ppos)
157 unsigned long p = *ppos;
158 ssize_t read;
159 char * pnt;
160 unsigned int sample_step = 1 << prof_shift;
162 if (p >= (prof_len+1)*sizeof(unsigned int))
163 return 0;
164 if (count > (prof_len+1)*sizeof(unsigned int) - p)
165 count = (prof_len+1)*sizeof(unsigned int) - p;
166 read = 0;
168 while (p < sizeof(unsigned int) && count > 0) {
169 put_user(*((char *)(&sample_step)+p),buf);
170 buf++; p++; count--; read++;
172 pnt = (char *)prof_buffer + p - sizeof(unsigned int);
173 copy_to_user(buf,(void *)pnt,count);
174 read += count;
175 *ppos += read;
176 return read;
180 * Writing to /proc/profile resets the counters
182 * Writing a 'profiling multiplier' value into it also re-sets the profiling
183 * interrupt frequency, on architectures that support this.
185 static ssize_t write_profile(struct file * file, const char * buf,
186 size_t count, loff_t *ppos)
188 #ifdef __SMP__
189 extern int setup_profiling_timer (unsigned int multiplier);
191 if (count==sizeof(int)) {
192 unsigned int multiplier;
194 if (copy_from_user(&multiplier, buf, sizeof(int)))
195 return -EFAULT;
197 if (setup_profiling_timer(multiplier))
198 return -EINVAL;
200 #endif
202 memset(prof_buffer, 0, prof_len * sizeof(*prof_buffer));
203 return count;
206 static struct file_operations proc_profile_operations = {
207 NULL, /* lseek */
208 read_profile,
209 write_profile,
212 struct inode_operations proc_profile_inode_operations = {
213 &proc_profile_operations,
217 static int get_loadavg(char * buffer)
219 int a, b, c;
221 a = avenrun[0] + (FIXED_1/200);
222 b = avenrun[1] + (FIXED_1/200);
223 c = avenrun[2] + (FIXED_1/200);
224 return sprintf(buffer,"%d.%02d %d.%02d %d.%02d %d/%d %d\n",
225 LOAD_INT(a), LOAD_FRAC(a),
226 LOAD_INT(b), LOAD_FRAC(b),
227 LOAD_INT(c), LOAD_FRAC(c),
228 nr_running, nr_tasks, last_pid);
231 static int get_kstat(char * buffer)
233 int i, len;
234 unsigned sum = 0;
235 extern unsigned long total_forks;
236 unsigned long ticks;
238 ticks = jiffies * smp_num_cpus;
239 for (i = 0 ; i < NR_IRQS ; i++)
240 sum += kstat_irqs(i);
242 #ifdef __SMP__
243 len = sprintf(buffer,
244 "cpu %u %u %u %lu\n",
245 kstat.cpu_user,
246 kstat.cpu_nice,
247 kstat.cpu_system,
248 jiffies*smp_num_cpus - (kstat.cpu_user + kstat.cpu_nice + kstat.cpu_system));
249 for (i = 0 ; i < smp_num_cpus; i++)
250 len += sprintf(buffer + len, "cpu%d %u %u %u %lu\n",
252 kstat.per_cpu_user[cpu_logical_map(i)],
253 kstat.per_cpu_nice[cpu_logical_map(i)],
254 kstat.per_cpu_system[cpu_logical_map(i)],
255 jiffies - ( kstat.per_cpu_user[cpu_logical_map(i)] \
256 + kstat.per_cpu_nice[cpu_logical_map(i)] \
257 + kstat.per_cpu_system[cpu_logical_map(i)]));
258 len += sprintf(buffer + len,
259 "disk %u %u %u %u\n"
260 "disk_rio %u %u %u %u\n"
261 "disk_wio %u %u %u %u\n"
262 "disk_rblk %u %u %u %u\n"
263 "disk_wblk %u %u %u %u\n"
264 "page %u %u\n"
265 "swap %u %u\n"
266 "intr %u",
267 #else
268 len = sprintf(buffer,
269 "cpu %u %u %u %lu\n"
270 "disk %u %u %u %u\n"
271 "disk_rio %u %u %u %u\n"
272 "disk_wio %u %u %u %u\n"
273 "disk_rblk %u %u %u %u\n"
274 "disk_wblk %u %u %u %u\n"
275 "page %u %u\n"
276 "swap %u %u\n"
277 "intr %u",
278 kstat.cpu_user,
279 kstat.cpu_nice,
280 kstat.cpu_system,
281 ticks - (kstat.cpu_user + kstat.cpu_nice + kstat.cpu_system),
282 #endif
283 kstat.dk_drive[0], kstat.dk_drive[1],
284 kstat.dk_drive[2], kstat.dk_drive[3],
285 kstat.dk_drive_rio[0], kstat.dk_drive_rio[1],
286 kstat.dk_drive_rio[2], kstat.dk_drive_rio[3],
287 kstat.dk_drive_wio[0], kstat.dk_drive_wio[1],
288 kstat.dk_drive_wio[2], kstat.dk_drive_wio[3],
289 kstat.dk_drive_rblk[0], kstat.dk_drive_rblk[1],
290 kstat.dk_drive_rblk[2], kstat.dk_drive_rblk[3],
291 kstat.dk_drive_wblk[0], kstat.dk_drive_wblk[1],
292 kstat.dk_drive_wblk[2], kstat.dk_drive_wblk[3],
293 kstat.pgpgin,
294 kstat.pgpgout,
295 kstat.pswpin,
296 kstat.pswpout,
297 sum);
298 for (i = 0 ; i < NR_IRQS ; i++)
299 len += sprintf(buffer + len, " %u", kstat_irqs(i));
300 len += sprintf(buffer + len,
301 "\nctxt %u\n"
302 "btime %lu\n"
303 "processes %lu\n",
304 kstat.context_swtch,
305 xtime.tv_sec - jiffies / HZ,
306 total_forks);
307 return len;
311 static int get_uptime(char * buffer)
313 unsigned long uptime;
314 unsigned long idle;
316 uptime = jiffies;
317 idle = task[0]->times.tms_utime + task[0]->times.tms_stime;
319 /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
320 that would overflow about every five days at HZ == 100.
321 Therefore the identity a = (a / b) * b + a % b is used so that it is
322 calculated as (((t / HZ) * 100) + ((t % HZ) * 100) / HZ) % 100.
323 The part in front of the '+' always evaluates as 0 (mod 100). All divisions
324 in the above formulas are truncating. For HZ being a power of 10, the
325 calculations simplify to the version in the #else part (if the printf
326 format is adapted to the same number of digits as zeroes in HZ.
328 #if HZ!=100
329 return sprintf(buffer,"%lu.%02lu %lu.%02lu\n",
330 uptime / HZ,
331 (((uptime % HZ) * 100) / HZ) % 100,
332 idle / HZ,
333 (((idle % HZ) * 100) / HZ) % 100);
334 #else
335 return sprintf(buffer,"%lu.%02lu %lu.%02lu\n",
336 uptime / HZ,
337 uptime % HZ,
338 idle / HZ,
339 idle % HZ);
340 #endif
343 static int get_meminfo(char * buffer)
345 struct sysinfo i;
346 int len;
348 si_meminfo(&i);
349 si_swapinfo(&i);
350 len = sprintf(buffer, " total: used: free: shared: buffers: cached:\n"
351 "Mem: %8lu %8lu %8lu %8lu %8lu %8lu\n"
352 "Swap: %8lu %8lu %8lu\n",
353 i.totalram, i.totalram-i.freeram, i.freeram, i.sharedram, i.bufferram, page_cache_size*PAGE_SIZE,
354 i.totalswap, i.totalswap-i.freeswap, i.freeswap);
356 * Tagged format, for easy grepping and expansion. The above will go away
357 * eventually, once the tools have been updated.
359 return len + sprintf(buffer+len,
360 "MemTotal: %8lu kB\n"
361 "MemFree: %8lu kB\n"
362 "MemShared: %8lu kB\n"
363 "Buffers: %8lu kB\n"
364 "Cached: %8lu kB\n"
365 "SwapTotal: %8lu kB\n"
366 "SwapFree: %8lu kB\n",
367 i.totalram >> 10,
368 i.freeram >> 10,
369 i.sharedram >> 10,
370 i.bufferram >> 10,
371 page_cache_size << (PAGE_SHIFT - 10),
372 i.totalswap >> 10,
373 i.freeswap >> 10);
376 static int get_version(char * buffer)
378 extern char *linux_banner;
380 strcpy(buffer, linux_banner);
381 return strlen(buffer);
384 static int get_cmdline(char * buffer)
386 extern char saved_command_line[];
388 return sprintf(buffer, "%s\n", saved_command_line);
392 * Caller must release_mm the mm_struct later.
393 * You don't get any access to init_mm.
395 static struct mm_struct *get_mm_and_lock(int pid)
397 struct mm_struct *mm = NULL;
398 struct task_struct *tsk;
400 read_lock(&tasklist_lock);
401 tsk = find_task_by_pid(pid);
402 if (tsk && tsk->mm && tsk->mm != &init_mm)
403 mmget(mm = tsk->mm);
404 read_unlock(&tasklist_lock);
405 if (mm != NULL)
406 down(&mm->mmap_sem);
407 return mm;
410 static void release_mm(struct mm_struct *mm)
412 up(&mm->mmap_sem);
413 mmput(mm);
416 static unsigned long get_phys_addr(struct mm_struct *mm, unsigned long ptr)
418 pgd_t *page_dir;
419 pmd_t *page_middle;
420 pte_t pte;
422 if (ptr >= TASK_SIZE)
423 return 0;
424 /* Check for NULL pgd .. shouldn't happen! */
425 if (!mm->pgd) {
426 printk(KERN_DEBUG "missing pgd for mm %p\n", mm);
427 return 0;
430 page_dir = pgd_offset(mm,ptr);
431 if (pgd_none(*page_dir))
432 return 0;
433 if (pgd_bad(*page_dir)) {
434 printk("bad page directory entry %08lx\n", pgd_val(*page_dir));
435 pgd_clear(page_dir);
436 return 0;
438 page_middle = pmd_offset(page_dir,ptr);
439 if (pmd_none(*page_middle))
440 return 0;
441 if (pmd_bad(*page_middle)) {
442 printk("bad page middle entry %08lx\n", pmd_val(*page_middle));
443 pmd_clear(page_middle);
444 return 0;
446 pte = *pte_offset(page_middle,ptr);
447 if (!pte_present(pte))
448 return 0;
449 return pte_page(pte) + (ptr & ~PAGE_MASK);
452 static int get_array(struct mm_struct *mm, unsigned long start, unsigned long end, char * buffer)
454 unsigned long addr;
455 int size = 0, result = 0;
456 char c;
458 if (start >= end)
459 return result;
460 for (;;) {
461 addr = get_phys_addr(mm, start);
462 if (!addr)
463 return result;
464 do {
465 c = *(char *) addr;
466 if (!c)
467 result = size;
468 if (size < PAGE_SIZE)
469 buffer[size++] = c;
470 else
471 return result;
472 addr++;
473 start++;
474 if (!c && start >= end)
475 return result;
476 } while (addr & ~PAGE_MASK);
478 return result;
481 static int get_env(int pid, char * buffer)
483 struct mm_struct *mm;
484 int res = 0;
486 mm = get_mm_and_lock(pid);
487 if (mm) {
488 res = get_array(mm, mm->env_start, mm->env_end, buffer);
489 release_mm(mm);
491 return res;
494 static int get_arg(int pid, char * buffer)
496 struct mm_struct *mm;
497 int res = 0;
499 mm = get_mm_and_lock(pid);
500 if (mm) {
501 res = get_array(mm, mm->arg_start, mm->arg_end, buffer);
502 release_mm(mm);
504 return res;
508 * These bracket the sleeping functions..
510 extern void scheduling_functions_start_here(void);
511 extern void scheduling_functions_end_here(void);
512 #define first_sched ((unsigned long) scheduling_functions_start_here)
513 #define last_sched ((unsigned long) scheduling_functions_end_here)
515 static unsigned long get_wchan(struct task_struct *p)
517 if (!p || p == current || p->state == TASK_RUNNING)
518 return 0;
519 #if defined(__i386__)
521 unsigned long ebp, esp, eip;
522 unsigned long stack_page;
523 int count = 0;
525 stack_page = (unsigned long)p;
526 esp = p->tss.esp;
527 if (!stack_page || esp < stack_page || esp >= 8188+stack_page)
528 return 0;
529 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
530 ebp = *(unsigned long *) esp;
531 do {
532 if (ebp < stack_page || ebp >= 8188+stack_page)
533 return 0;
534 eip = *(unsigned long *) (ebp+4);
535 if (eip < first_sched || eip >= last_sched)
536 return eip;
537 ebp = *(unsigned long *) ebp;
538 } while (count++ < 16);
540 #elif defined(__alpha__)
542 * This one depends on the frame size of schedule(). Do a
543 * "disass schedule" in gdb to find the frame size. Also, the
544 * code assumes that sleep_on() follows immediately after
545 * interruptible_sleep_on() and that add_timer() follows
546 * immediately after interruptible_sleep(). Ugly, isn't it?
547 * Maybe adding a wchan field to task_struct would be better,
548 * after all...
551 unsigned long schedule_frame;
552 unsigned long pc;
554 pc = thread_saved_pc(&p->tss);
555 if (pc >= first_sched && pc < last_sched) {
556 schedule_frame = ((unsigned long *)p->tss.ksp)[6];
557 return ((unsigned long *)schedule_frame)[12];
559 return pc;
561 #elif defined(__mc68000__)
563 unsigned long fp, pc;
564 unsigned long stack_page;
565 int count = 0;
567 stack_page = (unsigned long)p;
568 fp = ((struct switch_stack *)p->tss.ksp)->a6;
569 do {
570 if (fp < stack_page+sizeof(struct task_struct) ||
571 fp >= 8184+stack_page)
572 return 0;
573 pc = ((unsigned long *)fp)[1];
574 /* FIXME: This depends on the order of these functions. */
575 if (pc < first_sched || pc >= last_sched)
576 return pc;
577 fp = *(unsigned long *) fp;
578 } while (count++ < 16);
580 #elif defined(__powerpc__)
581 return (p->tss.wchan);
582 #elif defined (CONFIG_ARM)
584 unsigned long fp, lr;
585 unsigned long stack_page;
586 int count = 0;
588 stack_page = 4096 + (unsigned long)p;
589 fp = get_css_fp (&p->tss);
590 do {
591 if (fp < stack_page || fp > 4092+stack_page)
592 return 0;
593 lr = pc_pointer (((unsigned long *)fp)[-1]);
594 if (lr < first_sched || lr > last_sched)
595 return lr;
596 fp = *(unsigned long *) (fp - 12);
597 } while (count ++ < 16);
599 #elif defined (__sparc__)
601 unsigned long pc, fp, bias = 0;
602 unsigned long task_base = (unsigned long) p;
603 struct reg_window *rw;
604 int count = 0;
606 #ifdef __sparc_v9__
607 bias = STACK_BIAS;
608 #endif
609 fp = p->tss.ksp + bias;
610 do {
611 /* Bogus frame pointer? */
612 if (fp < (task_base + sizeof(struct task_struct)) ||
613 fp >= (task_base + (2 * PAGE_SIZE)))
614 break;
615 rw = (struct reg_window *) fp;
616 pc = rw->ins[7];
617 if (pc < first_sched || pc >= last_sched)
618 return pc;
619 fp = rw->ins[6] + bias;
620 } while (++count < 16);
622 #endif
623 return 0;
626 #if defined(__i386__)
627 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
628 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
629 #elif defined(__alpha__)
631 * See arch/alpha/kernel/ptrace.c for details.
633 # define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
634 + (long)&((struct pt_regs *)0)->reg)
635 # define KSTK_EIP(tsk) \
636 (*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
637 # define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
638 #elif defined(CONFIG_ARM)
639 # define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
640 # define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
641 #elif defined(__mc68000__)
642 #define KSTK_EIP(tsk) \
643 ({ \
644 unsigned long eip = 0; \
645 if ((tsk)->tss.esp0 > PAGE_SIZE && \
646 MAP_NR((tsk)->tss.esp0) < max_mapnr) \
647 eip = ((struct pt_regs *) (tsk)->tss.esp0)->pc; \
648 eip; })
649 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
650 #elif defined(__powerpc__)
651 #define KSTK_EIP(tsk) ((tsk)->tss.regs->nip)
652 #define KSTK_ESP(tsk) ((tsk)->tss.regs->gpr[1])
653 #elif defined (__sparc_v9__)
654 # define KSTK_EIP(tsk) ((tsk)->tss.kregs->tpc)
655 # define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
656 #elif defined(__sparc__)
657 # define KSTK_EIP(tsk) ((tsk)->tss.kregs->pc)
658 # define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
659 #endif
661 /* Gcc optimizes away "strlen(x)" for constant x */
662 #define ADDBUF(buffer, string) \
663 do { memcpy(buffer, string, strlen(string)); \
664 buffer += strlen(string); } while (0)
666 static inline char * task_name(struct task_struct *p, char * buf)
668 int i;
669 char * name;
671 ADDBUF(buf, "Name:\t");
672 name = p->comm;
673 i = sizeof(p->comm);
674 do {
675 unsigned char c = *name;
676 name++;
677 i--;
678 *buf = c;
679 if (!c)
680 break;
681 if (c == '\\') {
682 buf[1] = c;
683 buf += 2;
684 continue;
686 if (c == '\n') {
687 buf[0] = '\\';
688 buf[1] = 'n';
689 buf += 2;
690 continue;
692 buf++;
693 } while (i);
694 *buf = '\n';
695 return buf+1;
699 * The task state array is a strange "bitmap" of
700 * reasons to sleep. Thus "running" is zero, and
701 * you can test for combinations of others with
702 * simple bit tests.
704 static const char *task_state_array[] = {
705 "R (running)", /* 0 */
706 "S (sleeping)", /* 1 */
707 "D (disk sleep)", /* 2 */
708 "Z (zombie)", /* 4 */
709 "T (stopped)", /* 8 */
710 "W (paging)" /* 16 */
713 static inline const char * get_task_state(struct task_struct *tsk)
715 unsigned int state = tsk->state & (TASK_RUNNING |
716 TASK_INTERRUPTIBLE |
717 TASK_UNINTERRUPTIBLE |
718 TASK_ZOMBIE |
719 TASK_STOPPED |
720 TASK_SWAPPING);
721 const char **p = &task_state_array[0];
723 while (state) {
724 p++;
725 state >>= 1;
727 return *p;
730 static inline char * task_state(struct task_struct *p, char *buffer)
732 int g;
734 buffer += sprintf(buffer,
735 "State:\t%s\n"
736 "Pid:\t%d\n"
737 "PPid:\t%d\n"
738 "Uid:\t%d\t%d\t%d\t%d\n"
739 "Gid:\t%d\t%d\t%d\t%d\n"
740 "Groups:\t",
741 get_task_state(p),
742 p->pid, p->p_pptr->pid,
743 p->uid, p->euid, p->suid, p->fsuid,
744 p->gid, p->egid, p->sgid, p->fsgid);
746 for (g = 0; g < p->ngroups; g++)
747 buffer += sprintf(buffer, "%d ", p->groups[g]);
749 buffer += sprintf(buffer, "\n");
750 return buffer;
753 static inline char * task_mem(struct task_struct *p, char *buffer)
755 struct mm_struct * mm = p->mm;
757 if (!mm)
758 return buffer;
759 if (mm != &init_mm) {
760 struct vm_area_struct * vma;
761 unsigned long data = 0, stack = 0;
762 unsigned long exec = 0, lib = 0;
764 down(&mm->mmap_sem);
765 for (vma = mm->mmap; vma; vma = vma->vm_next) {
766 unsigned long len = (vma->vm_end - vma->vm_start) >> 10;
767 if (!vma->vm_file) {
768 data += len;
769 if (vma->vm_flags & VM_GROWSDOWN)
770 stack += len;
771 continue;
773 if (vma->vm_flags & VM_WRITE)
774 continue;
775 if (vma->vm_flags & VM_EXEC) {
776 exec += len;
777 if (vma->vm_flags & VM_EXECUTABLE)
778 continue;
779 lib += len;
782 up(&mm->mmap_sem);
783 buffer += sprintf(buffer,
784 "VmSize:\t%8lu kB\n"
785 "VmLck:\t%8lu kB\n"
786 "VmRSS:\t%8lu kB\n"
787 "VmData:\t%8lu kB\n"
788 "VmStk:\t%8lu kB\n"
789 "VmExe:\t%8lu kB\n"
790 "VmLib:\t%8lu kB\n",
791 mm->total_vm << (PAGE_SHIFT-10),
792 mm->locked_vm << (PAGE_SHIFT-10),
793 mm->rss << (PAGE_SHIFT-10),
794 data - stack, stack,
795 exec - lib, lib);
797 return buffer;
800 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
801 sigset_t *catch)
803 struct k_sigaction *k;
804 int i;
806 sigemptyset(ign);
807 sigemptyset(catch);
809 if (p->sig) {
810 k = p->sig->action;
811 for (i = 1; i <= _NSIG; ++i, ++k) {
812 if (k->sa.sa_handler == SIG_IGN)
813 sigaddset(ign, i);
814 else if (k->sa.sa_handler != SIG_DFL)
815 sigaddset(catch, i);
820 static inline char * task_sig(struct task_struct *p, char *buffer)
822 sigset_t ign, catch;
824 buffer += sprintf(buffer, "SigPnd:\t");
825 buffer = render_sigset_t(&p->signal, buffer);
826 *buffer++ = '\n';
827 buffer += sprintf(buffer, "SigBlk:\t");
828 buffer = render_sigset_t(&p->blocked, buffer);
829 *buffer++ = '\n';
831 collect_sigign_sigcatch(p, &ign, &catch);
832 buffer += sprintf(buffer, "SigIgn:\t");
833 buffer = render_sigset_t(&ign, buffer);
834 *buffer++ = '\n';
835 buffer += sprintf(buffer, "SigCgt:\t"); /* Linux 2.0 uses "SigCgt" */
836 buffer = render_sigset_t(&catch, buffer);
837 *buffer++ = '\n';
839 return buffer;
842 extern inline char *task_cap(struct task_struct *p, char *buffer)
844 return buffer + sprintf(buffer, "CapInh:\t%016x\n"
845 "CapPrm:\t%016x\n"
846 "CapEff:\t%016x\n",
847 cap_t(p->cap_inheritable),
848 cap_t(p->cap_permitted),
849 cap_t(p->cap_effective));
852 static struct task_struct *grab_task(int pid, struct task_struct *dst)
854 struct task_struct *tsk = current;
855 if (pid != tsk->pid) {
856 read_lock(&tasklist_lock);
857 tsk = find_task_by_pid(pid);
858 if (tsk) {
859 memcpy(dst, tsk, sizeof(struct task_struct));
860 tsk = dst;
861 if (tsk->mm && tsk->mm != &init_mm)
862 mmget(tsk->mm);
864 read_unlock(&tasklist_lock);
866 return tsk;
869 static void release_task(struct task_struct *tsk)
871 if (tsk != current && tsk->mm && tsk->mm != &init_mm)
872 mmput(tsk->mm);
875 static int get_status(int pid, char * buffer)
877 char * orig = buffer;
878 struct task_struct *tsk, mytask;
880 tsk = grab_task(pid, &mytask);
881 if (!tsk)
882 return 0;
883 buffer = task_name(tsk, buffer);
884 buffer = task_state(tsk, buffer);
885 buffer = task_mem(tsk, buffer);
886 buffer = task_sig(tsk, buffer);
887 buffer = task_cap(tsk, buffer);
888 release_task(tsk);
889 return buffer - orig;
892 static int get_stat(int pid, char * buffer)
894 struct task_struct *tsk, mytask;
895 unsigned long vsize, eip, esp, wchan;
896 long priority, nice;
897 int tty_pgrp;
898 sigset_t sigign, sigcatch;
899 char state;
900 int res;
902 tsk = grab_task(pid, &mytask);
903 if (!tsk)
904 return 0;
905 state = *get_task_state(tsk);
906 vsize = eip = esp = 0;
907 if (tsk->mm && tsk->mm != &init_mm) {
908 struct vm_area_struct *vma;
910 down(&tsk->mm->mmap_sem);
911 for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) {
912 vsize += vma->vm_end - vma->vm_start;
914 up(&tsk->mm->mmap_sem);
916 eip = KSTK_EIP(tsk);
917 esp = KSTK_ESP(tsk);
920 wchan = get_wchan(tsk);
922 collect_sigign_sigcatch(tsk, &sigign, &sigcatch);
924 if (tsk->tty)
925 tty_pgrp = tsk->tty->pgrp;
926 else
927 tty_pgrp = -1;
929 /* scale priority and nice values from timeslices to -20..20 */
930 /* to make it look like a "normal" Unix priority/nice value */
931 priority = tsk->counter;
932 priority = 20 - (priority * 10 + DEF_PRIORITY / 2) / DEF_PRIORITY;
933 nice = tsk->priority;
934 nice = 20 - (nice * 20 + DEF_PRIORITY / 2) / DEF_PRIORITY;
936 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
937 %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \
938 %lu %lu %lu %lu %lu %lu %lu %lu %d\n",
939 pid,
940 tsk->comm,
941 state,
942 tsk->p_pptr->pid,
943 tsk->pgrp,
944 tsk->session,
945 tsk->tty ? kdev_t_to_nr(tsk->tty->device) : 0,
946 tty_pgrp,
947 tsk->flags,
948 tsk->min_flt,
949 tsk->cmin_flt,
950 tsk->maj_flt,
951 tsk->cmaj_flt,
952 tsk->times.tms_utime,
953 tsk->times.tms_stime,
954 tsk->times.tms_cutime,
955 tsk->times.tms_cstime,
956 priority,
957 nice,
958 0UL /* removed */,
959 tsk->it_real_value,
960 tsk->start_time,
961 vsize,
962 tsk->mm ? tsk->mm->rss : 0, /* you might want to shift this left 3 */
963 tsk->rlim ? tsk->rlim[RLIMIT_RSS].rlim_cur : 0,
964 tsk->mm ? tsk->mm->start_code : 0,
965 tsk->mm ? tsk->mm->end_code : 0,
966 tsk->mm ? tsk->mm->start_stack : 0,
967 esp,
968 eip,
969 /* The signal information here is obsolete.
970 * It must be decimal for Linux 2.0 compatibility.
971 * Use /proc/#/status for real-time signals.
973 tsk->signal .sig[0] & 0x7fffffffUL,
974 tsk->blocked.sig[0] & 0x7fffffffUL,
975 sigign .sig[0] & 0x7fffffffUL,
976 sigcatch .sig[0] & 0x7fffffffUL,
977 wchan,
978 tsk->nswap,
979 tsk->cnswap,
980 tsk->exit_signal);
982 release_task(tsk);
983 return res;
986 static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned long size,
987 int * pages, int * shared, int * dirty, int * total)
989 pte_t * pte;
990 unsigned long end;
992 if (pmd_none(*pmd))
993 return;
994 if (pmd_bad(*pmd)) {
995 printk("statm_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
996 pmd_clear(pmd);
997 return;
999 pte = pte_offset(pmd, address);
1000 address &= ~PMD_MASK;
1001 end = address + size;
1002 if (end > PMD_SIZE)
1003 end = PMD_SIZE;
1004 do {
1005 pte_t page = *pte;
1007 address += PAGE_SIZE;
1008 pte++;
1009 if (pte_none(page))
1010 continue;
1011 ++*total;
1012 if (!pte_present(page))
1013 continue;
1014 ++*pages;
1015 if (pte_dirty(page))
1016 ++*dirty;
1017 if (MAP_NR(pte_page(page)) >= max_mapnr)
1018 continue;
1019 if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) > 1)
1020 ++*shared;
1021 } while (address < end);
1024 static inline void statm_pmd_range(pgd_t * pgd, unsigned long address, unsigned long size,
1025 int * pages, int * shared, int * dirty, int * total)
1027 pmd_t * pmd;
1028 unsigned long end;
1030 if (pgd_none(*pgd))
1031 return;
1032 if (pgd_bad(*pgd)) {
1033 printk("statm_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
1034 pgd_clear(pgd);
1035 return;
1037 pmd = pmd_offset(pgd, address);
1038 address &= ~PGDIR_MASK;
1039 end = address + size;
1040 if (end > PGDIR_SIZE)
1041 end = PGDIR_SIZE;
1042 do {
1043 statm_pte_range(pmd, address, end - address, pages, shared, dirty, total);
1044 address = (address + PMD_SIZE) & PMD_MASK;
1045 pmd++;
1046 } while (address < end);
1049 static void statm_pgd_range(pgd_t * pgd, unsigned long address, unsigned long end,
1050 int * pages, int * shared, int * dirty, int * total)
1052 while (address < end) {
1053 statm_pmd_range(pgd, address, end - address, pages, shared, dirty, total);
1054 address = (address + PGDIR_SIZE) & PGDIR_MASK;
1055 pgd++;
1059 static int get_statm(int pid, char * buffer)
1061 int size=0, resident=0, share=0, trs=0, lrs=0, drs=0, dt=0;
1062 struct mm_struct *mm;
1064 mm = get_mm_and_lock(pid);
1065 if (mm) {
1066 struct vm_area_struct * vma = mm->mmap;
1068 while (vma) {
1069 pgd_t *pgd = pgd_offset(mm, vma->vm_start);
1070 int pages = 0, shared = 0, dirty = 0, total = 0;
1072 statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total);
1073 resident += pages;
1074 share += shared;
1075 dt += dirty;
1076 size += total;
1077 if (vma->vm_flags & VM_EXECUTABLE)
1078 trs += pages; /* text */
1079 else if (vma->vm_flags & VM_GROWSDOWN)
1080 drs += pages; /* stack */
1081 else if (vma->vm_end > 0x60000000)
1082 lrs += pages; /* library */
1083 else
1084 drs += pages;
1085 vma = vma->vm_next;
1087 release_mm(mm);
1089 return sprintf(buffer,"%d %d %d %d %d %d %d\n",
1090 size, resident, share, trs, lrs, drs, dt);
1094 * The way we support synthetic files > 4K
1095 * - without storing their contents in some buffer and
1096 * - without walking through the entire synthetic file until we reach the
1097 * position of the requested data
1098 * is to cleverly encode the current position in the file's f_pos field.
1099 * There is no requirement that a read() call which returns `count' bytes
1100 * of data increases f_pos by exactly `count'.
1102 * This idea is Linus' one. Bruno implemented it.
1106 * For the /proc/<pid>/maps file, we use fixed length records, each containing
1107 * a single line.
1109 #define MAPS_LINE_LENGTH 4096
1110 #define MAPS_LINE_SHIFT 12
1112 * f_pos = (number of the vma in the task->mm->mmap list) * MAPS_LINE_LENGTH
1113 * + (index into the line)
1115 /* for systems with sizeof(void*) == 4: */
1116 #define MAPS_LINE_FORMAT4 "%08lx-%08lx %s %08lx %s %lu"
1117 #define MAPS_LINE_MAX4 49 /* sum of 8 1 8 1 4 1 8 1 5 1 10 1 */
1119 /* for systems with sizeof(void*) == 8: */
1120 #define MAPS_LINE_FORMAT8 "%016lx-%016lx %s %016lx %s %lu"
1121 #define MAPS_LINE_MAX8 73 /* sum of 16 1 16 1 4 1 16 1 5 1 10 1 */
1123 #define MAPS_LINE_MAX MAPS_LINE_MAX8
1125 /* FIXME: this does not do proper mm locking */
1126 static ssize_t read_maps (int pid, struct file * file, char * buf,
1127 size_t count, loff_t *ppos)
1129 struct task_struct *p;
1130 struct vm_area_struct * map, * next;
1131 char * destptr = buf, * buffer;
1132 loff_t lineno;
1133 ssize_t column, i;
1134 int volatile_task;
1135 long retval;
1138 * We might sleep getting the page, so get it first.
1140 retval = -ENOMEM;
1141 buffer = (char*)__get_free_page(GFP_KERNEL);
1142 if (!buffer)
1143 goto out;
1145 retval = -EINVAL;
1146 read_lock(&tasklist_lock);
1147 p = find_task_by_pid(pid);
1148 read_unlock(&tasklist_lock); /* FIXME!! This should be done after the last use */
1149 if (!p)
1150 goto freepage_out;
1152 if (!p->mm || p->mm == &init_mm || count == 0)
1153 goto getlen_out;
1155 /* Check whether the mmaps could change if we sleep */
1156 volatile_task = (p != current || atomic_read(&p->mm->count) > 1);
1158 /* decode f_pos */
1159 lineno = *ppos >> MAPS_LINE_SHIFT;
1160 column = *ppos & (MAPS_LINE_LENGTH-1);
1162 /* quickly go to line lineno */
1163 for (map = p->mm->mmap, i = 0; map && (i < lineno); map = map->vm_next, i++)
1164 continue;
1166 for ( ; map ; map = next ) {
1167 /* produce the next line */
1168 char *line;
1169 char str[5], *cp = str;
1170 int flags;
1171 kdev_t dev;
1172 unsigned long ino;
1173 int maxlen = (sizeof(void*) == 4) ?
1174 MAPS_LINE_MAX4 : MAPS_LINE_MAX8;
1175 int len;
1178 * Get the next vma now (but it won't be used if we sleep).
1180 next = map->vm_next;
1181 flags = map->vm_flags;
1183 *cp++ = flags & VM_READ ? 'r' : '-';
1184 *cp++ = flags & VM_WRITE ? 'w' : '-';
1185 *cp++ = flags & VM_EXEC ? 'x' : '-';
1186 *cp++ = flags & VM_MAYSHARE ? 's' : 'p';
1187 *cp++ = 0;
1189 dev = 0;
1190 ino = 0;
1191 if (map->vm_file != NULL) {
1192 dev = map->vm_file->f_dentry->d_inode->i_dev;
1193 ino = map->vm_file->f_dentry->d_inode->i_ino;
1194 line = d_path(map->vm_file->f_dentry, buffer, PAGE_SIZE);
1195 buffer[PAGE_SIZE-1] = '\n';
1196 line -= maxlen;
1197 if(line < buffer)
1198 line = buffer;
1199 } else
1200 line = buffer;
1202 len = sprintf(line,
1203 sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8,
1204 map->vm_start, map->vm_end, str, map->vm_offset,
1205 kdevname(dev), ino);
1207 if(map->vm_file) {
1208 for(i = len; i < maxlen; i++)
1209 line[i] = ' ';
1210 len = buffer + PAGE_SIZE - line;
1211 } else
1212 line[len++] = '\n';
1213 if (column >= len) {
1214 column = 0; /* continue with next line at column 0 */
1215 lineno++;
1216 continue; /* we haven't slept */
1219 i = len-column;
1220 if (i > count)
1221 i = count;
1222 copy_to_user(destptr, line+column, i); /* may have slept */
1223 destptr += i;
1224 count -= i;
1225 column += i;
1226 if (column >= len) {
1227 column = 0; /* next time: next line at column 0 */
1228 lineno++;
1231 /* done? */
1232 if (count == 0)
1233 break;
1235 /* By writing to user space, we might have slept.
1236 * Stop the loop, to avoid a race condition.
1238 if (volatile_task)
1239 break;
1242 /* encode f_pos */
1243 *ppos = (lineno << MAPS_LINE_SHIFT) + column;
1245 getlen_out:
1246 retval = destptr - buf;
1248 freepage_out:
1249 free_page((unsigned long)buffer);
1250 out:
1251 return retval;
1254 #ifdef __SMP__
1255 static int get_pidcpu(int pid, char * buffer)
1257 struct task_struct * tsk, mytask;
1258 int i, len;
1260 tsk = grab_task(pid, &mytask);
1261 if (!tsk)
1262 return 0;
1264 len = sprintf(buffer,
1265 "cpu %lu %lu\n",
1266 tsk->times.tms_utime,
1267 tsk->times.tms_stime);
1269 for (i = 0 ; i < smp_num_cpus; i++)
1270 len += sprintf(buffer + len, "cpu%d %lu %lu\n",
1272 tsk->per_cpu_utime[cpu_logical_map(i)],
1273 tsk->per_cpu_stime[cpu_logical_map(i)]);
1275 release_task(tsk);
1276 return len;
1278 #endif
1280 #ifdef CONFIG_MODULES
1281 extern int get_module_list(char *);
1282 extern int get_ksyms_list(char *, char **, off_t, int);
1283 #endif
1284 extern int get_device_list(char *);
1285 extern int get_partition_list(char *);
1286 extern int get_filesystem_list(char *);
1287 extern int get_filesystem_info( char * );
1288 extern int get_irq_list(char *);
1289 extern int get_dma_list(char *);
1290 extern int get_cpuinfo(char *);
1291 extern int get_pci_list(char *);
1292 extern int get_md_status (char *);
1293 extern int get_rtc_status (char *);
1294 extern int get_locks_status (char *, char **, off_t, int);
1295 extern int get_swaparea_info (char *);
1296 extern int get_hardware_list(char *);
1297 extern int get_stram_list(char *);
1299 static long get_root_array(char * page, int type, char **start,
1300 off_t offset, unsigned long length)
1302 switch (type) {
1303 case PROC_LOADAVG:
1304 return get_loadavg(page);
1306 case PROC_UPTIME:
1307 return get_uptime(page);
1309 case PROC_MEMINFO:
1310 return get_meminfo(page);
1312 #ifdef CONFIG_PCI_OLD_PROC
1313 case PROC_PCI:
1314 return get_pci_list(page);
1315 #endif
1317 #ifdef CONFIG_NUBUS
1318 case PROC_NUBUS:
1319 return get_nubus_list(page);
1320 #endif
1322 case PROC_CPUINFO:
1323 return get_cpuinfo(page);
1325 case PROC_VERSION:
1326 return get_version(page);
1328 #ifdef CONFIG_DEBUG_MALLOC
1329 case PROC_MALLOC:
1330 return get_malloc(page);
1331 #endif
1333 #ifdef CONFIG_MODULES
1334 case PROC_MODULES:
1335 return get_module_list(page);
1337 case PROC_KSYMS:
1338 return get_ksyms_list(page, start, offset, length);
1339 #endif
1341 case PROC_STAT:
1342 return get_kstat(page);
1344 case PROC_SLABINFO:
1345 return get_slabinfo(page);
1347 case PROC_DEVICES:
1348 return get_device_list(page);
1350 case PROC_PARTITIONS:
1351 return get_partition_list(page);
1353 case PROC_INTERRUPTS:
1354 return get_irq_list(page);
1356 case PROC_FILESYSTEMS:
1357 return get_filesystem_list(page);
1359 case PROC_DMA:
1360 return get_dma_list(page);
1362 case PROC_IOPORTS:
1363 return get_ioport_list(page);
1364 #ifdef CONFIG_BLK_DEV_MD
1365 case PROC_MD:
1366 return get_md_status(page);
1367 #endif
1368 case PROC_CMDLINE:
1369 return get_cmdline(page);
1371 case PROC_MTAB:
1372 return get_filesystem_info( page );
1374 case PROC_SWAP:
1375 return get_swaparea_info(page);
1376 #ifdef CONFIG_RTC
1377 case PROC_RTC:
1378 return get_rtc_status(page);
1379 #endif
1380 case PROC_LOCKS:
1381 return get_locks_status(page, start, offset, length);
1382 #ifdef CONFIG_PROC_HARDWARE
1383 case PROC_HARDWARE:
1384 return get_hardware_list(page);
1385 #endif
1386 #ifdef CONFIG_STRAM_PROC
1387 case PROC_STRAM:
1388 return get_stram_list(page);
1389 #endif
1391 return -EBADF;
1394 static int process_unauthorized(int type, int pid)
1396 struct task_struct *p;
1397 uid_t euid=0; /* Save the euid keep the lock short */
1399 read_lock(&tasklist_lock);
1402 * Grab the lock, find the task, save the uid and
1403 * check it has an mm still (ie its not dead)
1406 p = find_task_by_pid(pid);
1407 if(p)
1409 euid=p->euid;
1410 if(!p->mm) /* Scooby scooby doo where are you ? */
1411 p=NULL;
1414 read_unlock(&tasklist_lock);
1416 if (!p)
1417 return 1;
1419 switch(type)
1421 case PROC_PID_STATUS:
1422 case PROC_PID_STATM:
1423 case PROC_PID_STAT:
1424 case PROC_PID_MAPS:
1425 case PROC_PID_CMDLINE:
1426 case PROC_PID_CPU:
1427 return 0;
1429 if(capable(CAP_DAC_OVERRIDE) || current->fsuid == euid)
1430 return 0;
1431 return 1;
1435 static int get_process_array(char * page, int pid, int type)
1437 switch (type) {
1438 case PROC_PID_STATUS:
1439 return get_status(pid, page);
1440 case PROC_PID_ENVIRON:
1441 return get_env(pid, page);
1442 case PROC_PID_CMDLINE:
1443 return get_arg(pid, page);
1444 case PROC_PID_STAT:
1445 return get_stat(pid, page);
1446 case PROC_PID_STATM:
1447 return get_statm(pid, page);
1448 #ifdef __SMP__
1449 case PROC_PID_CPU:
1450 return get_pidcpu(pid, page);
1451 #endif
1453 return -EBADF;
1457 static inline int fill_array(char * page, int pid, int type, char **start, off_t offset, int length)
1459 if (pid)
1460 return get_process_array(page, pid, type);
1461 return get_root_array(page, type, start, offset, length);
1464 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
1466 static ssize_t array_read(struct file * file, char * buf,
1467 size_t count, loff_t *ppos)
1469 struct inode * inode = file->f_dentry->d_inode;
1470 unsigned long page;
1471 char *start;
1472 ssize_t length;
1473 ssize_t end;
1474 unsigned int type, pid;
1475 struct proc_dir_entry *dp;
1476 int err;
1478 if (count > PROC_BLOCK_SIZE)
1479 count = PROC_BLOCK_SIZE;
1480 if (!(page = __get_free_page(GFP_KERNEL)))
1481 return -ENOMEM;
1482 type = inode->i_ino;
1483 pid = type >> 16;
1484 type &= 0x0000ffff;
1485 start = NULL;
1486 dp = (struct proc_dir_entry *) inode->u.generic_ip;
1488 if (pid && process_unauthorized(type, pid))
1490 free_page(page);
1491 return -EIO;
1494 if (dp->get_info)
1495 length = dp->get_info((char *)page, &start, *ppos,
1496 count, 0);
1497 else
1498 length = fill_array((char *) page, pid, type,
1499 &start, *ppos, count);
1500 if (length < 0) {
1501 free_page(page);
1502 return length;
1504 if (start != NULL) {
1505 if (length > count)
1506 length = count;
1507 /* We have had block-adjusting processing! */
1508 err = copy_to_user(buf, start, length);
1509 *ppos += length;
1510 count = length;
1511 } else {
1512 /* Static 4kB (or whatever) block capacity */
1513 if (*ppos >= length) {
1514 free_page(page);
1515 return 0;
1517 if (count + *ppos > length)
1518 count = length - *ppos;
1519 end = count + *ppos;
1520 err = copy_to_user(buf, (char *) page + *ppos, count);
1521 *ppos = end;
1523 free_page(page);
1524 return err ? -EFAULT : count;
1527 static struct file_operations proc_array_operations = {
1528 NULL, /* array_lseek */
1529 array_read,
1530 NULL, /* array_write */
1531 NULL, /* array_readdir */
1532 NULL, /* array_poll */
1533 NULL, /* array_ioctl */
1534 NULL, /* mmap */
1535 NULL, /* no special open code */
1536 NULL, /* flush */
1537 NULL, /* no special release code */
1538 NULL /* can't fsync */
1541 struct inode_operations proc_array_inode_operations = {
1542 &proc_array_operations, /* default base directory file-ops */
1543 NULL, /* create */
1544 NULL, /* lookup */
1545 NULL, /* link */
1546 NULL, /* unlink */
1547 NULL, /* symlink */
1548 NULL, /* mkdir */
1549 NULL, /* rmdir */
1550 NULL, /* mknod */
1551 NULL, /* rename */
1552 NULL, /* readlink */
1553 NULL, /* follow_link */
1554 NULL, /* readpage */
1555 NULL, /* writepage */
1556 NULL, /* bmap */
1557 NULL, /* truncate */
1558 NULL /* permission */
1561 static ssize_t arraylong_read(struct file * file, char * buf,
1562 size_t count, loff_t *ppos)
1564 struct inode * inode = file->f_dentry->d_inode;
1565 unsigned int pid = inode->i_ino >> 16;
1566 unsigned int type = inode->i_ino & 0x0000ffff;
1568 switch (type) {
1569 case PROC_PID_MAPS:
1570 return read_maps(pid, file, buf, count, ppos);
1572 return -EINVAL;
1575 static struct file_operations proc_arraylong_operations = {
1576 NULL, /* array_lseek */
1577 arraylong_read,
1578 NULL, /* array_write */
1579 NULL, /* array_readdir */
1580 NULL, /* array_poll */
1581 NULL, /* array_ioctl */
1582 NULL, /* mmap */
1583 NULL, /* no special open code */
1584 NULL, /* flush */
1585 NULL, /* no special release code */
1586 NULL /* can't fsync */
1589 struct inode_operations proc_arraylong_inode_operations = {
1590 &proc_arraylong_operations, /* default base directory file-ops */
1591 NULL, /* create */
1592 NULL, /* lookup */
1593 NULL, /* link */
1594 NULL, /* unlink */
1595 NULL, /* symlink */
1596 NULL, /* mkdir */
1597 NULL, /* rmdir */
1598 NULL, /* mknod */
1599 NULL, /* rename */
1600 NULL, /* readlink */
1601 NULL, /* follow_link */
1602 NULL, /* readpage */
1603 NULL, /* writepage */
1604 NULL, /* bmap */
1605 NULL, /* truncate */
1606 NULL /* permission */