add untracked files and scripts/checkpatch.pl format
[linux-2.6.34.14-moxart.git] / mm / vmstat.c
blob97f64e4804dc78af8aa2b68bc33f143b3ae753b1
1 /*
2 * linux/mm/vmstat.c
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/vmstat.h>
18 #include <linux/sched.h>
20 #ifdef CONFIG_VM_EVENT_COUNTERS
21 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
22 EXPORT_PER_CPU_SYMBOL(vm_event_states);
24 static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
26 int cpu;
27 int i;
29 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
31 for_each_cpu(cpu, cpumask) {
32 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
34 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
35 ret[i] += this->event[i];
40 * Accumulate the vm event counters across all CPUs.
41 * The result is unavoidably approximate - it can change
42 * during and after execution of this function.
44 void all_vm_events(unsigned long *ret)
46 get_online_cpus();
47 sum_vm_events(ret, cpu_online_mask);
48 put_online_cpus();
50 EXPORT_SYMBOL_GPL(all_vm_events);
52 #ifdef CONFIG_HOTPLUG
54 * Fold the foreign cpu events into our own.
56 * This is adding to the events on one processor
57 * but keeps the global counts constant.
59 void vm_events_fold_cpu(int cpu)
61 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
62 int i;
64 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
65 count_vm_events(i, fold_state->event[i]);
66 fold_state->event[i] = 0;
69 #endif /* CONFIG_HOTPLUG */
71 #endif /* CONFIG_VM_EVENT_COUNTERS */
74 * Manage combined zone based / global counters
76 * vm_stat contains the global counters
78 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79 EXPORT_SYMBOL(vm_stat);
81 #ifdef CONFIG_SMP
83 static int calculate_threshold(struct zone *zone)
85 int threshold;
86 int mem; /* memory in 128 MB units */
89 * The threshold scales with the number of processors and the amount
90 * of memory per zone. More memory means that we can defer updates for
91 * longer, more processors could lead to more contention.
92 * fls() is used to have a cheap way of logarithmic scaling.
94 * Some sample thresholds:
96 * Threshold Processors (fls) Zonesize fls(mem+1)
97 * ------------------------------------------------------------------
98 * 8 1 1 0.9-1 GB 4
99 * 16 2 2 0.9-1 GB 4
100 * 20 2 2 1-2 GB 5
101 * 24 2 2 2-4 GB 6
102 * 28 2 2 4-8 GB 7
103 * 32 2 2 8-16 GB 8
104 * 4 2 2 <128M 1
105 * 30 4 3 2-4 GB 5
106 * 48 4 3 8-16 GB 8
107 * 32 8 4 1-2 GB 4
108 * 32 8 4 0.9-1GB 4
109 * 10 16 5 <128M 1
110 * 40 16 5 900M 4
111 * 70 64 7 2-4 GB 5
112 * 84 64 7 4-8 GB 6
113 * 108 512 9 4-8 GB 6
114 * 125 1024 10 8-16 GB 8
115 * 125 1024 10 16-32 GB 9
118 mem = zone->present_pages >> (27 - PAGE_SHIFT);
120 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
123 * Maximum threshold is 125
125 threshold = min(125, threshold);
127 return threshold;
131 * Refresh the thresholds for each zone.
133 static void refresh_zone_stat_thresholds(void)
135 struct zone *zone;
136 int cpu;
137 int threshold;
139 for_each_populated_zone(zone) {
140 unsigned long max_drift, tolerate_drift;
142 threshold = calculate_threshold(zone);
144 for_each_online_cpu(cpu)
145 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
146 = threshold;
149 * Only set percpu_drift_mark if there is a danger that
150 * NR_FREE_PAGES reports the low watermark is ok when in fact
151 * the min watermark could be breached by an allocation
153 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
154 max_drift = num_online_cpus() * threshold;
155 if (max_drift > tolerate_drift)
156 zone->percpu_drift_mark = high_wmark_pages(zone) +
157 max_drift;
162 * For use when we know that interrupts are disabled.
164 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
165 int delta)
167 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
169 s8 *p = pcp->vm_stat_diff + item;
170 long x;
172 x = delta + *p;
174 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
175 zone_page_state_add(x, zone, item);
176 x = 0;
178 *p = x;
180 EXPORT_SYMBOL(__mod_zone_page_state);
183 * For an unknown interrupt state
185 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
186 int delta)
188 unsigned long flags;
190 local_irq_save(flags);
191 __mod_zone_page_state(zone, item, delta);
192 local_irq_restore(flags);
194 EXPORT_SYMBOL(mod_zone_page_state);
197 * Optimized increment and decrement functions.
199 * These are only for a single page and therefore can take a struct page *
200 * argument instead of struct zone *. This allows the inclusion of the code
201 * generated for page_zone(page) into the optimized functions.
203 * No overflow check is necessary and therefore the differential can be
204 * incremented or decremented in place which may allow the compilers to
205 * generate better code.
206 * The increment or decrement is known and therefore one boundary check can
207 * be omitted.
209 * NOTE: These functions are very performance sensitive. Change only
210 * with care.
212 * Some processors have inc/dec instructions that are atomic vs an interrupt.
213 * However, the code must first determine the differential location in a zone
214 * based on the processor number and then inc/dec the counter. There is no
215 * guarantee without disabling preemption that the processor will not change
216 * in between and therefore the atomicity vs. interrupt cannot be exploited
217 * in a useful way here.
219 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
221 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
222 s8 *p = pcp->vm_stat_diff + item;
224 (*p)++;
226 if (unlikely(*p > pcp->stat_threshold)) {
227 int overstep = pcp->stat_threshold / 2;
229 zone_page_state_add(*p + overstep, zone, item);
230 *p = -overstep;
234 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
236 __inc_zone_state(page_zone(page), item);
238 EXPORT_SYMBOL(__inc_zone_page_state);
240 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
242 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
243 s8 *p = pcp->vm_stat_diff + item;
245 (*p)--;
247 if (unlikely(*p < - pcp->stat_threshold)) {
248 int overstep = pcp->stat_threshold / 2;
250 zone_page_state_add(*p - overstep, zone, item);
251 *p = overstep;
255 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
257 __dec_zone_state(page_zone(page), item);
259 EXPORT_SYMBOL(__dec_zone_page_state);
261 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
263 unsigned long flags;
265 local_irq_save(flags);
266 __inc_zone_state(zone, item);
267 local_irq_restore(flags);
270 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
272 unsigned long flags;
273 struct zone *zone;
275 zone = page_zone(page);
276 local_irq_save(flags);
277 __inc_zone_state(zone, item);
278 local_irq_restore(flags);
280 EXPORT_SYMBOL(inc_zone_page_state);
282 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
284 unsigned long flags;
286 local_irq_save(flags);
287 __dec_zone_page_state(page, item);
288 local_irq_restore(flags);
290 EXPORT_SYMBOL(dec_zone_page_state);
293 * Update the zone counters for one cpu.
295 * The cpu specified must be either the current cpu or a processor that
296 * is not online. If it is the current cpu then the execution thread must
297 * be pinned to the current cpu.
299 * Note that refresh_cpu_vm_stats strives to only access
300 * node local memory. The per cpu pagesets on remote zones are placed
301 * in the memory local to the processor using that pageset. So the
302 * loop over all zones will access a series of cachelines local to
303 * the processor.
305 * The call to zone_page_state_add updates the cachelines with the
306 * statistics in the remote zone struct as well as the global cachelines
307 * with the global counters. These could cause remote node cache line
308 * bouncing and will have to be only done when necessary.
310 void refresh_cpu_vm_stats(int cpu)
312 struct zone *zone;
313 int i;
314 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
316 for_each_populated_zone(zone) {
317 struct per_cpu_pageset *p;
319 p = per_cpu_ptr(zone->pageset, cpu);
321 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
322 if (p->vm_stat_diff[i]) {
323 unsigned long flags;
324 int v;
326 local_irq_save(flags);
327 v = p->vm_stat_diff[i];
328 p->vm_stat_diff[i] = 0;
329 local_irq_restore(flags);
330 atomic_long_add(v, &zone->vm_stat[i]);
331 global_diff[i] += v;
332 #ifdef CONFIG_NUMA
333 /* 3 seconds idle till flush */
334 p->expire = 3;
335 #endif
337 cond_resched();
338 #ifdef CONFIG_NUMA
340 * Deal with draining the remote pageset of this
341 * processor
343 * Check if there are pages remaining in this pageset
344 * if not then there is nothing to expire.
346 if (!p->expire || !p->pcp.count)
347 continue;
350 * We never drain zones local to this processor.
352 if (zone_to_nid(zone) == numa_node_id()) {
353 p->expire = 0;
354 continue;
357 p->expire--;
358 if (p->expire)
359 continue;
361 if (p->pcp.count)
362 drain_zone_pages(zone, &p->pcp);
363 #endif
366 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
367 if (global_diff[i])
368 atomic_long_add(global_diff[i], &vm_stat[i]);
371 #endif
373 #ifdef CONFIG_NUMA
375 * zonelist = the list of zones passed to the allocator
376 * z = the zone from which the allocation occurred.
378 * Must be called with interrupts disabled.
380 void zone_statistics(struct zone *preferred_zone, struct zone *z)
382 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
383 __inc_zone_state(z, NUMA_HIT);
384 } else {
385 __inc_zone_state(z, NUMA_MISS);
386 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
388 if (z->node == numa_node_id())
389 __inc_zone_state(z, NUMA_LOCAL);
390 else
391 __inc_zone_state(z, NUMA_OTHER);
393 #endif
395 #ifdef CONFIG_PROC_FS
396 #include <linux/proc_fs.h>
397 #include <linux/seq_file.h>
399 static char * const migratetype_names[MIGRATE_TYPES] = {
400 "Unmovable",
401 "Reclaimable",
402 "Movable",
403 "Reserve",
404 "Isolate",
407 static void *frag_start(struct seq_file *m, loff_t *pos)
409 pg_data_t *pgdat;
410 loff_t node = *pos;
411 for (pgdat = first_online_pgdat();
412 pgdat && node;
413 pgdat = next_online_pgdat(pgdat))
414 --node;
416 return pgdat;
419 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
421 pg_data_t *pgdat = (pg_data_t *)arg;
423 (*pos)++;
424 return next_online_pgdat(pgdat);
427 static void frag_stop(struct seq_file *m, void *arg)
431 /* Walk all the zones in a node and print using a callback */
432 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
433 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
435 struct zone *zone;
436 struct zone *node_zones = pgdat->node_zones;
437 unsigned long flags;
439 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
440 if (!populated_zone(zone))
441 continue;
443 spin_lock_irqsave(&zone->lock, flags);
444 print(m, pgdat, zone);
445 spin_unlock_irqrestore(&zone->lock, flags);
449 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
450 struct zone *zone)
452 int order;
454 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
455 for (order = 0; order < MAX_ORDER; ++order)
456 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
457 seq_putc(m, '\n');
461 * This walks the free areas for each zone.
463 static int frag_show(struct seq_file *m, void *arg)
465 pg_data_t *pgdat = (pg_data_t *)arg;
466 walk_zones_in_node(m, pgdat, frag_show_print);
467 return 0;
470 static void pagetypeinfo_showfree_print(struct seq_file *m,
471 pg_data_t *pgdat, struct zone *zone)
473 int order, mtype;
475 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
476 seq_printf(m, "Node %4d, zone %8s, type %12s ",
477 pgdat->node_id,
478 zone->name,
479 migratetype_names[mtype]);
480 for (order = 0; order < MAX_ORDER; ++order) {
481 unsigned long freecount = 0;
482 struct free_area *area;
483 struct list_head *curr;
485 area = &(zone->free_area[order]);
487 list_for_each(curr, &area->free_list[mtype])
488 freecount++;
489 seq_printf(m, "%6lu ", freecount);
491 seq_putc(m, '\n');
495 /* Print out the free pages at each order for each migatetype */
496 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
498 int order;
499 pg_data_t *pgdat = (pg_data_t *)arg;
501 /* Print header */
502 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
503 for (order = 0; order < MAX_ORDER; ++order)
504 seq_printf(m, "%6d ", order);
505 seq_putc(m, '\n');
507 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
509 return 0;
512 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
513 pg_data_t *pgdat, struct zone *zone)
515 int mtype;
516 unsigned long pfn;
517 unsigned long start_pfn = zone->zone_start_pfn;
518 unsigned long end_pfn = start_pfn + zone->spanned_pages;
519 unsigned long count[MIGRATE_TYPES] = { 0, };
521 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
522 struct page *page;
524 if (!pfn_valid(pfn))
525 continue;
527 page = pfn_to_page(pfn);
529 /* Watch for unexpected holes punched in the memmap */
530 if (!memmap_valid_within(pfn, page, zone))
531 continue;
533 mtype = get_pageblock_migratetype(page);
535 if (mtype < MIGRATE_TYPES)
536 count[mtype]++;
539 /* Print counts */
540 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
541 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
542 seq_printf(m, "%12lu ", count[mtype]);
543 seq_putc(m, '\n');
546 /* Print out the free pages at each order for each migratetype */
547 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
549 int mtype;
550 pg_data_t *pgdat = (pg_data_t *)arg;
552 seq_printf(m, "\n%-23s", "Number of blocks type ");
553 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
554 seq_printf(m, "%12s ", migratetype_names[mtype]);
555 seq_putc(m, '\n');
556 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
558 return 0;
562 * This prints out statistics in relation to grouping pages by mobility.
563 * It is expensive to collect so do not constantly read the file.
565 static int pagetypeinfo_show(struct seq_file *m, void *arg)
567 pg_data_t *pgdat = (pg_data_t *)arg;
569 /* check memoryless node */
570 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
571 return 0;
573 seq_printf(m, "Page block order: %d\n", pageblock_order);
574 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
575 seq_putc(m, '\n');
576 pagetypeinfo_showfree(m, pgdat);
577 pagetypeinfo_showblockcount(m, pgdat);
579 return 0;
582 static const struct seq_operations fragmentation_op = {
583 .start = frag_start,
584 .next = frag_next,
585 .stop = frag_stop,
586 .show = frag_show,
589 static int fragmentation_open(struct inode *inode, struct file *file)
591 return seq_open(file, &fragmentation_op);
594 static const struct file_operations fragmentation_file_operations = {
595 .open = fragmentation_open,
596 .read = seq_read,
597 .llseek = seq_lseek,
598 .release = seq_release,
601 static const struct seq_operations pagetypeinfo_op = {
602 .start = frag_start,
603 .next = frag_next,
604 .stop = frag_stop,
605 .show = pagetypeinfo_show,
608 static int pagetypeinfo_open(struct inode *inode, struct file *file)
610 return seq_open(file, &pagetypeinfo_op);
613 static const struct file_operations pagetypeinfo_file_ops = {
614 .open = pagetypeinfo_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = seq_release,
620 #ifdef CONFIG_ZONE_DMA
621 #define TEXT_FOR_DMA(xx) xx "_dma",
622 #else
623 #define TEXT_FOR_DMA(xx)
624 #endif
626 #ifdef CONFIG_ZONE_DMA32
627 #define TEXT_FOR_DMA32(xx) xx "_dma32",
628 #else
629 #define TEXT_FOR_DMA32(xx)
630 #endif
632 #ifdef CONFIG_HIGHMEM
633 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
634 #else
635 #define TEXT_FOR_HIGHMEM(xx)
636 #endif
638 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
639 TEXT_FOR_HIGHMEM(xx) xx "_movable",
641 static const char * const vmstat_text[] = {
642 /* Zoned VM counters */
643 "nr_free_pages",
644 "nr_inactive_anon",
645 "nr_active_anon",
646 "nr_inactive_file",
647 "nr_active_file",
648 "nr_unevictable",
649 "nr_mlock",
650 "nr_anon_pages",
651 "nr_mapped",
652 "nr_file_pages",
653 "nr_dirty",
654 "nr_writeback",
655 "nr_slab_reclaimable",
656 "nr_slab_unreclaimable",
657 "nr_page_table_pages",
658 "nr_kernel_stack",
659 "nr_unstable",
660 "nr_bounce",
661 "nr_vmscan_write",
662 "nr_writeback_temp",
663 "nr_isolated_anon",
664 "nr_isolated_file",
665 "nr_shmem",
666 #ifdef CONFIG_NUMA
667 "numa_hit",
668 "numa_miss",
669 "numa_foreign",
670 "numa_interleave",
671 "numa_local",
672 "numa_other",
673 #endif
675 #ifdef CONFIG_VM_EVENT_COUNTERS
676 "pgpgin",
677 "pgpgout",
678 "pswpin",
679 "pswpout",
681 TEXTS_FOR_ZONES("pgalloc")
683 "pgfree",
684 "pgactivate",
685 "pgdeactivate",
687 "pgfault",
688 "pgmajfault",
690 TEXTS_FOR_ZONES("pgrefill")
691 TEXTS_FOR_ZONES("pgsteal")
692 TEXTS_FOR_ZONES("pgscan_kswapd")
693 TEXTS_FOR_ZONES("pgscan_direct")
695 #ifdef CONFIG_NUMA
696 "zone_reclaim_failed",
697 #endif
698 "pginodesteal",
699 "slabs_scanned",
700 "kswapd_steal",
701 "kswapd_inodesteal",
702 "kswapd_low_wmark_hit_quickly",
703 "kswapd_high_wmark_hit_quickly",
704 "kswapd_skip_congestion_wait",
705 "pageoutrun",
706 "allocstall",
708 "pgrotated",
709 #ifdef CONFIG_HUGETLB_PAGE
710 "htlb_buddy_alloc_success",
711 "htlb_buddy_alloc_fail",
712 #endif
713 "unevictable_pgs_culled",
714 "unevictable_pgs_scanned",
715 "unevictable_pgs_rescued",
716 "unevictable_pgs_mlocked",
717 "unevictable_pgs_munlocked",
718 "unevictable_pgs_cleared",
719 "unevictable_pgs_stranded",
720 "unevictable_pgs_mlockfreed",
721 #endif
724 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
725 struct zone *zone)
727 int i;
728 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
729 seq_printf(m,
730 "\n pages free %lu"
731 "\n min %lu"
732 "\n low %lu"
733 "\n high %lu"
734 "\n scanned %lu"
735 "\n spanned %lu"
736 "\n present %lu",
737 zone_nr_free_pages(zone),
738 min_wmark_pages(zone),
739 low_wmark_pages(zone),
740 high_wmark_pages(zone),
741 zone->pages_scanned,
742 zone->spanned_pages,
743 zone->present_pages);
745 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
746 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
747 zone_page_state(zone, i));
749 seq_printf(m,
750 "\n protection: (%lu",
751 zone->lowmem_reserve[0]);
752 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
753 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
754 seq_printf(m,
756 "\n pagesets");
757 for_each_online_cpu(i) {
758 struct per_cpu_pageset *pageset;
760 pageset = per_cpu_ptr(zone->pageset, i);
761 seq_printf(m,
762 "\n cpu: %i"
763 "\n count: %i"
764 "\n high: %i"
765 "\n batch: %i",
767 pageset->pcp.count,
768 pageset->pcp.high,
769 pageset->pcp.batch);
770 #ifdef CONFIG_SMP
771 seq_printf(m, "\n vm stats threshold: %d",
772 pageset->stat_threshold);
773 #endif
775 seq_printf(m,
776 "\n all_unreclaimable: %u"
777 "\n prev_priority: %i"
778 "\n start_pfn: %lu"
779 "\n inactive_ratio: %u",
780 zone->all_unreclaimable,
781 zone->prev_priority,
782 zone->zone_start_pfn,
783 zone->inactive_ratio);
784 seq_putc(m, '\n');
788 * Output information about zones in @pgdat.
790 static int zoneinfo_show(struct seq_file *m, void *arg)
792 pg_data_t *pgdat = (pg_data_t *)arg;
793 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
794 return 0;
797 static const struct seq_operations zoneinfo_op = {
798 .start = frag_start, /* iterate over all zones. The same as in
799 * fragmentation. */
800 .next = frag_next,
801 .stop = frag_stop,
802 .show = zoneinfo_show,
805 static int zoneinfo_open(struct inode *inode, struct file *file)
807 return seq_open(file, &zoneinfo_op);
810 static const struct file_operations proc_zoneinfo_file_operations = {
811 .open = zoneinfo_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = seq_release,
817 static void *vmstat_start(struct seq_file *m, loff_t *pos)
819 unsigned long *v;
820 #ifdef CONFIG_VM_EVENT_COUNTERS
821 unsigned long *e;
822 #endif
823 int i;
825 if (*pos >= ARRAY_SIZE(vmstat_text))
826 return NULL;
828 #ifdef CONFIG_VM_EVENT_COUNTERS
829 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
830 + sizeof(struct vm_event_state), GFP_KERNEL);
831 #else
832 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
833 GFP_KERNEL);
834 #endif
835 m->private = v;
836 if (!v)
837 return ERR_PTR(-ENOMEM);
838 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
839 v[i] = global_page_state(i);
840 #ifdef CONFIG_VM_EVENT_COUNTERS
841 e = v + NR_VM_ZONE_STAT_ITEMS;
842 all_vm_events(e);
843 e[PGPGIN] /= 2; /* sectors -> kbytes */
844 e[PGPGOUT] /= 2;
845 #endif
846 return v + *pos;
849 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
851 (*pos)++;
852 if (*pos >= ARRAY_SIZE(vmstat_text))
853 return NULL;
854 return (unsigned long *)m->private + *pos;
857 static int vmstat_show(struct seq_file *m, void *arg)
859 unsigned long *l = arg;
860 unsigned long off = l - (unsigned long *)m->private;
862 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
863 return 0;
866 static void vmstat_stop(struct seq_file *m, void *arg)
868 kfree(m->private);
869 m->private = NULL;
872 static const struct seq_operations vmstat_op = {
873 .start = vmstat_start,
874 .next = vmstat_next,
875 .stop = vmstat_stop,
876 .show = vmstat_show,
879 static int vmstat_open(struct inode *inode, struct file *file)
881 return seq_open(file, &vmstat_op);
884 static const struct file_operations proc_vmstat_file_operations = {
885 .open = vmstat_open,
886 .read = seq_read,
887 .llseek = seq_lseek,
888 .release = seq_release,
890 #endif /* CONFIG_PROC_FS */
892 #ifdef CONFIG_SMP
893 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
894 int sysctl_stat_interval __read_mostly = HZ;
896 static void vmstat_update(struct work_struct *w)
898 refresh_cpu_vm_stats(smp_processor_id());
899 schedule_delayed_work(&__get_cpu_var(vmstat_work),
900 round_jiffies_relative(sysctl_stat_interval));
903 static void __cpuinit start_cpu_timer(int cpu)
905 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
907 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
908 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
912 * Use the cpu notifier to insure that the thresholds are recalculated
913 * when necessary.
915 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
916 unsigned long action,
917 void *hcpu)
919 long cpu = (long)hcpu;
921 switch (action) {
922 case CPU_ONLINE:
923 case CPU_ONLINE_FROZEN:
924 start_cpu_timer(cpu);
925 node_set_state(cpu_to_node(cpu), N_CPU);
926 break;
927 case CPU_DOWN_PREPARE:
928 case CPU_DOWN_PREPARE_FROZEN:
929 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
930 per_cpu(vmstat_work, cpu).work.func = NULL;
931 break;
932 case CPU_DOWN_FAILED:
933 case CPU_DOWN_FAILED_FROZEN:
934 start_cpu_timer(cpu);
935 break;
936 case CPU_DEAD:
937 case CPU_DEAD_FROZEN:
938 refresh_zone_stat_thresholds();
939 break;
940 default:
941 break;
943 return NOTIFY_OK;
946 static struct notifier_block __cpuinitdata vmstat_notifier =
947 { &vmstat_cpuup_callback, NULL, 0 };
948 #endif
950 static int __init setup_vmstat(void)
952 #ifdef CONFIG_SMP
953 int cpu;
955 refresh_zone_stat_thresholds();
956 register_cpu_notifier(&vmstat_notifier);
958 for_each_online_cpu(cpu)
959 start_cpu_timer(cpu);
960 #endif
961 #ifdef CONFIG_PROC_FS
962 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
963 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
964 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
965 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
966 #endif
967 return 0;
969 module_init(setup_vmstat)