proc: move /proc/devices code to fs/proc/devices.c
[linux-2.6/verdex.git] / fs / proc / proc_misc.c
bloba6fadc0cc4b0d6701116ce22778e5b12e76407a6
1 /*
2 * linux/fs/proc/proc_misc.c
4 * linux/fs/proc/array.c
5 * Copyright (C) 1992 by Linus Torvalds
6 * based on ideas by Darren Senn
8 * This used to be the part of array.c. See the rest of history and credits
9 * there. I took this into a separate file and switched the thing to generic
10 * proc_file_inode_operations, leaving in array.c only per-process stuff.
11 * Inumbers allocation made dynamic (via create_proc_entry()). AV, May 1999.
13 * Changes:
14 * Fulton Green : Encapsulated position metric calculations.
15 * <kernel@FultonGreen.com>
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/time.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/fs.h>
24 #include <linux/tty.h>
25 #include <linux/string.h>
26 #include <linux/mman.h>
27 #include <linux/quicklist.h>
28 #include <linux/proc_fs.h>
29 #include <linux/ioport.h>
30 #include <linux/mm.h>
31 #include <linux/mmzone.h>
32 #include <linux/pagemap.h>
33 #include <linux/irq.h>
34 #include <linux/interrupt.h>
35 #include <linux/swap.h>
36 #include <linux/slab.h>
37 #include <linux/genhd.h>
38 #include <linux/smp.h>
39 #include <linux/signal.h>
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/seq_file.h>
43 #include <linux/times.h>
44 #include <linux/profile.h>
45 #include <linux/utsname.h>
46 #include <linux/blkdev.h>
47 #include <linux/hugetlb.h>
48 #include <linux/jiffies.h>
49 #include <linux/vmalloc.h>
50 #include <linux/crash_dump.h>
51 #include <linux/pid_namespace.h>
52 #include <linux/bootmem.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgtable.h>
55 #include <asm/io.h>
56 #include <asm/tlb.h>
57 #include <asm/div64.h>
58 #include "internal.h"
60 static int fragmentation_open(struct inode *inode, struct file *file)
62 (void)inode;
63 return seq_open(file, &fragmentation_op);
66 static const struct file_operations fragmentation_file_operations = {
67 .open = fragmentation_open,
68 .read = seq_read,
69 .llseek = seq_lseek,
70 .release = seq_release,
73 static int pagetypeinfo_open(struct inode *inode, struct file *file)
75 return seq_open(file, &pagetypeinfo_op);
78 static const struct file_operations pagetypeinfo_file_ops = {
79 .open = pagetypeinfo_open,
80 .read = seq_read,
81 .llseek = seq_lseek,
82 .release = seq_release,
85 static int zoneinfo_open(struct inode *inode, struct file *file)
87 return seq_open(file, &zoneinfo_op);
90 static const struct file_operations proc_zoneinfo_file_operations = {
91 .open = zoneinfo_open,
92 .read = seq_read,
93 .llseek = seq_lseek,
94 .release = seq_release,
97 extern const struct seq_operations cpuinfo_op;
98 static int cpuinfo_open(struct inode *inode, struct file *file)
100 return seq_open(file, &cpuinfo_op);
103 static const struct file_operations proc_cpuinfo_operations = {
104 .open = cpuinfo_open,
105 .read = seq_read,
106 .llseek = seq_lseek,
107 .release = seq_release,
110 static int vmstat_open(struct inode *inode, struct file *file)
112 return seq_open(file, &vmstat_op);
114 static const struct file_operations proc_vmstat_file_operations = {
115 .open = vmstat_open,
116 .read = seq_read,
117 .llseek = seq_lseek,
118 .release = seq_release,
121 #ifdef CONFIG_BLOCK
122 static int partitions_open(struct inode *inode, struct file *file)
124 return seq_open(file, &partitions_op);
126 static const struct file_operations proc_partitions_operations = {
127 .open = partitions_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = seq_release,
133 static int diskstats_open(struct inode *inode, struct file *file)
135 return seq_open(file, &diskstats_op);
137 static const struct file_operations proc_diskstats_operations = {
138 .open = diskstats_open,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = seq_release,
143 #endif
145 #ifdef CONFIG_MODULES
146 extern const struct seq_operations modules_op;
147 static int modules_open(struct inode *inode, struct file *file)
149 return seq_open(file, &modules_op);
151 static const struct file_operations proc_modules_operations = {
152 .open = modules_open,
153 .read = seq_read,
154 .llseek = seq_lseek,
155 .release = seq_release,
157 #endif
159 #ifdef CONFIG_SLABINFO
160 static int slabinfo_open(struct inode *inode, struct file *file)
162 return seq_open(file, &slabinfo_op);
164 static const struct file_operations proc_slabinfo_operations = {
165 .open = slabinfo_open,
166 .read = seq_read,
167 .write = slabinfo_write,
168 .llseek = seq_lseek,
169 .release = seq_release,
172 #ifdef CONFIG_DEBUG_SLAB_LEAK
173 extern const struct seq_operations slabstats_op;
174 static int slabstats_open(struct inode *inode, struct file *file)
176 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
177 int ret = -ENOMEM;
178 if (n) {
179 ret = seq_open(file, &slabstats_op);
180 if (!ret) {
181 struct seq_file *m = file->private_data;
182 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
183 m->private = n;
184 n = NULL;
186 kfree(n);
188 return ret;
191 static const struct file_operations proc_slabstats_operations = {
192 .open = slabstats_open,
193 .read = seq_read,
194 .llseek = seq_lseek,
195 .release = seq_release_private,
197 #endif
198 #endif
200 #ifdef CONFIG_MMU
201 static int vmalloc_open(struct inode *inode, struct file *file)
203 unsigned int *ptr = NULL;
204 int ret;
206 if (NUMA_BUILD)
207 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
208 ret = seq_open(file, &vmalloc_op);
209 if (!ret) {
210 struct seq_file *m = file->private_data;
211 m->private = ptr;
212 } else
213 kfree(ptr);
214 return ret;
217 static const struct file_operations proc_vmalloc_operations = {
218 .open = vmalloc_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = seq_release_private,
223 #endif
225 #ifndef arch_irq_stat_cpu
226 #define arch_irq_stat_cpu(cpu) 0
227 #endif
228 #ifndef arch_irq_stat
229 #define arch_irq_stat() 0
230 #endif
232 static int show_stat(struct seq_file *p, void *v)
234 int i, j;
235 unsigned long jif;
236 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
237 cputime64_t guest;
238 u64 sum = 0;
239 struct timespec boottime;
240 unsigned int per_irq_sum;
242 user = nice = system = idle = iowait =
243 irq = softirq = steal = cputime64_zero;
244 guest = cputime64_zero;
245 getboottime(&boottime);
246 jif = boottime.tv_sec;
248 for_each_possible_cpu(i) {
249 user = cputime64_add(user, kstat_cpu(i).cpustat.user);
250 nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
251 system = cputime64_add(system, kstat_cpu(i).cpustat.system);
252 idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
253 iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
254 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
255 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
256 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
257 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
259 for_each_irq_nr(j)
260 sum += kstat_irqs_cpu(j, i);
262 sum += arch_irq_stat_cpu(i);
264 sum += arch_irq_stat();
266 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
267 (unsigned long long)cputime64_to_clock_t(user),
268 (unsigned long long)cputime64_to_clock_t(nice),
269 (unsigned long long)cputime64_to_clock_t(system),
270 (unsigned long long)cputime64_to_clock_t(idle),
271 (unsigned long long)cputime64_to_clock_t(iowait),
272 (unsigned long long)cputime64_to_clock_t(irq),
273 (unsigned long long)cputime64_to_clock_t(softirq),
274 (unsigned long long)cputime64_to_clock_t(steal),
275 (unsigned long long)cputime64_to_clock_t(guest));
276 for_each_online_cpu(i) {
278 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
279 user = kstat_cpu(i).cpustat.user;
280 nice = kstat_cpu(i).cpustat.nice;
281 system = kstat_cpu(i).cpustat.system;
282 idle = kstat_cpu(i).cpustat.idle;
283 iowait = kstat_cpu(i).cpustat.iowait;
284 irq = kstat_cpu(i).cpustat.irq;
285 softirq = kstat_cpu(i).cpustat.softirq;
286 steal = kstat_cpu(i).cpustat.steal;
287 guest = kstat_cpu(i).cpustat.guest;
288 seq_printf(p,
289 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
291 (unsigned long long)cputime64_to_clock_t(user),
292 (unsigned long long)cputime64_to_clock_t(nice),
293 (unsigned long long)cputime64_to_clock_t(system),
294 (unsigned long long)cputime64_to_clock_t(idle),
295 (unsigned long long)cputime64_to_clock_t(iowait),
296 (unsigned long long)cputime64_to_clock_t(irq),
297 (unsigned long long)cputime64_to_clock_t(softirq),
298 (unsigned long long)cputime64_to_clock_t(steal),
299 (unsigned long long)cputime64_to_clock_t(guest));
301 seq_printf(p, "intr %llu", (unsigned long long)sum);
303 /* sum again ? it could be updated? */
304 for_each_irq_nr(j) {
305 per_irq_sum = 0;
307 for_each_possible_cpu(i)
308 per_irq_sum += kstat_irqs_cpu(j, i);
310 seq_printf(p, " %u", per_irq_sum);
313 seq_printf(p,
314 "\nctxt %llu\n"
315 "btime %lu\n"
316 "processes %lu\n"
317 "procs_running %lu\n"
318 "procs_blocked %lu\n",
319 nr_context_switches(),
320 (unsigned long)jif,
321 total_forks,
322 nr_running(),
323 nr_iowait());
325 return 0;
328 static int stat_open(struct inode *inode, struct file *file)
330 unsigned size = 4096 * (1 + num_possible_cpus() / 32);
331 char *buf;
332 struct seq_file *m;
333 int res;
335 /* don't ask for more than the kmalloc() max size, currently 128 KB */
336 if (size > 128 * 1024)
337 size = 128 * 1024;
338 buf = kmalloc(size, GFP_KERNEL);
339 if (!buf)
340 return -ENOMEM;
342 res = single_open(file, show_stat, NULL);
343 if (!res) {
344 m = file->private_data;
345 m->buf = buf;
346 m->size = size;
347 } else
348 kfree(buf);
349 return res;
351 static const struct file_operations proc_stat_operations = {
352 .open = stat_open,
353 .read = seq_read,
354 .llseek = seq_lseek,
355 .release = single_release,
359 * /proc/interrupts
361 static void *int_seq_start(struct seq_file *f, loff_t *pos)
363 return (*pos <= nr_irqs) ? pos : NULL;
367 static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
369 (*pos)++;
370 return (*pos <= nr_irqs) ? pos : NULL;
373 static void int_seq_stop(struct seq_file *f, void *v)
375 /* Nothing to do */
378 static const struct seq_operations int_seq_ops = {
379 .start = int_seq_start,
380 .next = int_seq_next,
381 .stop = int_seq_stop,
382 .show = show_interrupts
385 static int interrupts_open(struct inode *inode, struct file *filp)
387 return seq_open(filp, &int_seq_ops);
390 static const struct file_operations proc_interrupts_operations = {
391 .open = interrupts_open,
392 .read = seq_read,
393 .llseek = seq_lseek,
394 .release = seq_release,
397 #ifdef CONFIG_PROC_PAGE_MONITOR
398 #define KPMSIZE sizeof(u64)
399 #define KPMMASK (KPMSIZE - 1)
400 /* /proc/kpagecount - an array exposing page counts
402 * Each entry is a u64 representing the corresponding
403 * physical page count.
405 static ssize_t kpagecount_read(struct file *file, char __user *buf,
406 size_t count, loff_t *ppos)
408 u64 __user *out = (u64 __user *)buf;
409 struct page *ppage;
410 unsigned long src = *ppos;
411 unsigned long pfn;
412 ssize_t ret = 0;
413 u64 pcount;
415 pfn = src / KPMSIZE;
416 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
417 if (src & KPMMASK || count & KPMMASK)
418 return -EINVAL;
420 while (count > 0) {
421 ppage = NULL;
422 if (pfn_valid(pfn))
423 ppage = pfn_to_page(pfn);
424 pfn++;
425 if (!ppage)
426 pcount = 0;
427 else
428 pcount = page_mapcount(ppage);
430 if (put_user(pcount, out++)) {
431 ret = -EFAULT;
432 break;
435 count -= KPMSIZE;
438 *ppos += (char __user *)out - buf;
439 if (!ret)
440 ret = (char __user *)out - buf;
441 return ret;
444 static struct file_operations proc_kpagecount_operations = {
445 .llseek = mem_lseek,
446 .read = kpagecount_read,
449 /* /proc/kpageflags - an array exposing page flags
451 * Each entry is a u64 representing the corresponding
452 * physical page flags.
455 /* These macros are used to decouple internal flags from exported ones */
457 #define KPF_LOCKED 0
458 #define KPF_ERROR 1
459 #define KPF_REFERENCED 2
460 #define KPF_UPTODATE 3
461 #define KPF_DIRTY 4
462 #define KPF_LRU 5
463 #define KPF_ACTIVE 6
464 #define KPF_SLAB 7
465 #define KPF_WRITEBACK 8
466 #define KPF_RECLAIM 9
467 #define KPF_BUDDY 10
469 #define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
471 static ssize_t kpageflags_read(struct file *file, char __user *buf,
472 size_t count, loff_t *ppos)
474 u64 __user *out = (u64 __user *)buf;
475 struct page *ppage;
476 unsigned long src = *ppos;
477 unsigned long pfn;
478 ssize_t ret = 0;
479 u64 kflags, uflags;
481 pfn = src / KPMSIZE;
482 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
483 if (src & KPMMASK || count & KPMMASK)
484 return -EINVAL;
486 while (count > 0) {
487 ppage = NULL;
488 if (pfn_valid(pfn))
489 ppage = pfn_to_page(pfn);
490 pfn++;
491 if (!ppage)
492 kflags = 0;
493 else
494 kflags = ppage->flags;
496 uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
497 kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
498 kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
499 kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
500 kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
501 kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
502 kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
503 kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
504 kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
505 kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
506 kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
508 if (put_user(uflags, out++)) {
509 ret = -EFAULT;
510 break;
513 count -= KPMSIZE;
516 *ppos += (char __user *)out - buf;
517 if (!ret)
518 ret = (char __user *)out - buf;
519 return ret;
522 static struct file_operations proc_kpageflags_operations = {
523 .llseek = mem_lseek,
524 .read = kpageflags_read,
526 #endif /* CONFIG_PROC_PAGE_MONITOR */
528 struct proc_dir_entry *proc_root_kcore;
530 void __init proc_misc_init(void)
532 proc_symlink("mounts", NULL, "self/mounts");
534 /* And now for trickier ones */
535 proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations);
536 #ifdef CONFIG_BLOCK
537 proc_create("partitions", 0, NULL, &proc_partitions_operations);
538 #endif
539 proc_create("stat", 0, NULL, &proc_stat_operations);
540 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
541 #ifdef CONFIG_SLABINFO
542 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
543 #ifdef CONFIG_DEBUG_SLAB_LEAK
544 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
545 #endif
546 #endif
547 #ifdef CONFIG_MMU
548 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
549 #endif
550 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
551 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
552 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
553 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
554 #ifdef CONFIG_BLOCK
555 proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
556 #endif
557 #ifdef CONFIG_MODULES
558 proc_create("modules", 0, NULL, &proc_modules_operations);
559 #endif
560 #ifdef CONFIG_SCHEDSTATS
561 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
562 #endif
563 #ifdef CONFIG_PROC_KCORE
564 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
565 if (proc_root_kcore)
566 proc_root_kcore->size =
567 (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
568 #endif
569 #ifdef CONFIG_PROC_PAGE_MONITOR
570 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
571 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
572 #endif
573 #ifdef CONFIG_PROC_VMCORE
574 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
575 #endif