[PATCH] zoned-vm-counters: remove read_page_state()
[linux-2.6/openmoko-kernel/knife-kernel.git] / mm / vmstat.c
blob06a6d10521982eace9c47c5fa85f90c644de8998
1 /*
2 * linux/mm/vmstat.c
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
12 #include <linux/config.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
17 * Accumulate the page_state information across all CPUs.
18 * The result is unavoidably approximate - it can change
19 * during and after execution of this function.
21 DEFINE_PER_CPU(struct page_state, page_states) = {0};
23 static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
25 unsigned cpu;
27 memset(ret, 0, nr * sizeof(unsigned long));
28 cpus_and(*cpumask, *cpumask, cpu_online_map);
30 for_each_cpu_mask(cpu, *cpumask) {
31 unsigned long *in;
32 unsigned long *out;
33 unsigned off;
34 unsigned next_cpu;
36 in = (unsigned long *)&per_cpu(page_states, cpu);
38 next_cpu = next_cpu(cpu, *cpumask);
39 if (likely(next_cpu < NR_CPUS))
40 prefetch(&per_cpu(page_states, next_cpu));
42 out = (unsigned long *)ret;
43 for (off = 0; off < nr; off++)
44 *out++ += *in++;
48 void get_full_page_state(struct page_state *ret)
50 cpumask_t mask = CPU_MASK_ALL;
52 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
55 void __mod_page_state_offset(unsigned long offset, unsigned long delta)
57 void *ptr;
59 ptr = &__get_cpu_var(page_states);
60 *(unsigned long *)(ptr + offset) += delta;
62 EXPORT_SYMBOL(__mod_page_state_offset);
64 void mod_page_state_offset(unsigned long offset, unsigned long delta)
66 unsigned long flags;
67 void *ptr;
69 local_irq_save(flags);
70 ptr = &__get_cpu_var(page_states);
71 *(unsigned long *)(ptr + offset) += delta;
72 local_irq_restore(flags);
74 EXPORT_SYMBOL(mod_page_state_offset);
76 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
77 unsigned long *free, struct pglist_data *pgdat)
79 struct zone *zones = pgdat->node_zones;
80 int i;
82 *active = 0;
83 *inactive = 0;
84 *free = 0;
85 for (i = 0; i < MAX_NR_ZONES; i++) {
86 *active += zones[i].nr_active;
87 *inactive += zones[i].nr_inactive;
88 *free += zones[i].free_pages;
92 void get_zone_counts(unsigned long *active,
93 unsigned long *inactive, unsigned long *free)
95 struct pglist_data *pgdat;
97 *active = 0;
98 *inactive = 0;
99 *free = 0;
100 for_each_online_pgdat(pgdat) {
101 unsigned long l, m, n;
102 __get_zone_counts(&l, &m, &n, pgdat);
103 *active += l;
104 *inactive += m;
105 *free += n;
110 * Manage combined zone based / global counters
112 * vm_stat contains the global counters
114 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
115 EXPORT_SYMBOL(vm_stat);
117 #ifdef CONFIG_SMP
119 #define STAT_THRESHOLD 32
122 * Determine pointer to currently valid differential byte given a zone and
123 * the item number.
125 * Preemption must be off
127 static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item)
129 return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item];
133 * For use when we know that interrupts are disabled.
135 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
136 int delta)
138 s8 *p;
139 long x;
141 p = diff_pointer(zone, item);
142 x = delta + *p;
144 if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) {
145 zone_page_state_add(x, zone, item);
146 x = 0;
149 *p = x;
151 EXPORT_SYMBOL(__mod_zone_page_state);
154 * For an unknown interrupt state
156 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
157 int delta)
159 unsigned long flags;
161 local_irq_save(flags);
162 __mod_zone_page_state(zone, item, delta);
163 local_irq_restore(flags);
165 EXPORT_SYMBOL(mod_zone_page_state);
168 * Optimized increment and decrement functions.
170 * These are only for a single page and therefore can take a struct page *
171 * argument instead of struct zone *. This allows the inclusion of the code
172 * generated for page_zone(page) into the optimized functions.
174 * No overflow check is necessary and therefore the differential can be
175 * incremented or decremented in place which may allow the compilers to
176 * generate better code.
178 * The increment or decrement is known and therefore one boundary check can
179 * be omitted.
181 * Some processors have inc/dec instructions that are atomic vs an interrupt.
182 * However, the code must first determine the differential location in a zone
183 * based on the processor number and then inc/dec the counter. There is no
184 * guarantee without disabling preemption that the processor will not change
185 * in between and therefore the atomicity vs. interrupt cannot be exploited
186 * in a useful way here.
188 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
190 struct zone *zone = page_zone(page);
191 s8 *p = diff_pointer(zone, item);
193 (*p)++;
195 if (unlikely(*p > STAT_THRESHOLD)) {
196 zone_page_state_add(*p, zone, item);
197 *p = 0;
200 EXPORT_SYMBOL(__inc_zone_page_state);
202 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
204 struct zone *zone = page_zone(page);
205 s8 *p = diff_pointer(zone, item);
207 (*p)--;
209 if (unlikely(*p < -STAT_THRESHOLD)) {
210 zone_page_state_add(*p, zone, item);
211 *p = 0;
214 EXPORT_SYMBOL(__dec_zone_page_state);
216 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
218 unsigned long flags;
219 struct zone *zone;
220 s8 *p;
222 zone = page_zone(page);
223 local_irq_save(flags);
224 p = diff_pointer(zone, item);
226 (*p)++;
228 if (unlikely(*p > STAT_THRESHOLD)) {
229 zone_page_state_add(*p, zone, item);
230 *p = 0;
232 local_irq_restore(flags);
234 EXPORT_SYMBOL(inc_zone_page_state);
236 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
238 unsigned long flags;
239 struct zone *zone;
240 s8 *p;
242 zone = page_zone(page);
243 local_irq_save(flags);
244 p = diff_pointer(zone, item);
246 (*p)--;
248 if (unlikely(*p < -STAT_THRESHOLD)) {
249 zone_page_state_add(*p, zone, item);
250 *p = 0;
252 local_irq_restore(flags);
254 EXPORT_SYMBOL(dec_zone_page_state);
257 * Update the zone counters for one cpu.
259 void refresh_cpu_vm_stats(int cpu)
261 struct zone *zone;
262 int i;
263 unsigned long flags;
265 for_each_zone(zone) {
266 struct per_cpu_pageset *pcp;
268 pcp = zone_pcp(zone, cpu);
270 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
271 if (pcp->vm_stat_diff[i]) {
272 local_irq_save(flags);
273 zone_page_state_add(pcp->vm_stat_diff[i],
274 zone, i);
275 pcp->vm_stat_diff[i] = 0;
276 local_irq_restore(flags);
281 static void __refresh_cpu_vm_stats(void *dummy)
283 refresh_cpu_vm_stats(smp_processor_id());
287 * Consolidate all counters.
289 * Note that the result is less inaccurate but still inaccurate
290 * if concurrent processes are allowed to run.
292 void refresh_vm_stats(void)
294 on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1);
296 EXPORT_SYMBOL(refresh_vm_stats);
298 #endif
300 #ifdef CONFIG_PROC_FS
302 #include <linux/seq_file.h>
304 static void *frag_start(struct seq_file *m, loff_t *pos)
306 pg_data_t *pgdat;
307 loff_t node = *pos;
308 for (pgdat = first_online_pgdat();
309 pgdat && node;
310 pgdat = next_online_pgdat(pgdat))
311 --node;
313 return pgdat;
316 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
318 pg_data_t *pgdat = (pg_data_t *)arg;
320 (*pos)++;
321 return next_online_pgdat(pgdat);
324 static void frag_stop(struct seq_file *m, void *arg)
329 * This walks the free areas for each zone.
331 static int frag_show(struct seq_file *m, void *arg)
333 pg_data_t *pgdat = (pg_data_t *)arg;
334 struct zone *zone;
335 struct zone *node_zones = pgdat->node_zones;
336 unsigned long flags;
337 int order;
339 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
340 if (!populated_zone(zone))
341 continue;
343 spin_lock_irqsave(&zone->lock, flags);
344 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
345 for (order = 0; order < MAX_ORDER; ++order)
346 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
347 spin_unlock_irqrestore(&zone->lock, flags);
348 seq_putc(m, '\n');
350 return 0;
353 struct seq_operations fragmentation_op = {
354 .start = frag_start,
355 .next = frag_next,
356 .stop = frag_stop,
357 .show = frag_show,
360 static char *vmstat_text[] = {
361 /* Zoned VM counters */
362 "nr_anon_pages",
363 "nr_mapped",
364 "nr_file_pages",
365 "nr_slab",
366 "nr_page_table_pages",
367 "nr_dirty",
368 "nr_writeback",
369 "nr_unstable",
370 "nr_bounce",
372 /* Event counters */
373 "pgpgin",
374 "pgpgout",
375 "pswpin",
376 "pswpout",
378 "pgalloc_high",
379 "pgalloc_normal",
380 "pgalloc_dma32",
381 "pgalloc_dma",
383 "pgfree",
384 "pgactivate",
385 "pgdeactivate",
387 "pgfault",
388 "pgmajfault",
390 "pgrefill_high",
391 "pgrefill_normal",
392 "pgrefill_dma32",
393 "pgrefill_dma",
395 "pgsteal_high",
396 "pgsteal_normal",
397 "pgsteal_dma32",
398 "pgsteal_dma",
400 "pgscan_kswapd_high",
401 "pgscan_kswapd_normal",
402 "pgscan_kswapd_dma32",
403 "pgscan_kswapd_dma",
405 "pgscan_direct_high",
406 "pgscan_direct_normal",
407 "pgscan_direct_dma32",
408 "pgscan_direct_dma",
410 "pginodesteal",
411 "slabs_scanned",
412 "kswapd_steal",
413 "kswapd_inodesteal",
414 "pageoutrun",
415 "allocstall",
417 "pgrotated",
421 * Output information about zones in @pgdat.
423 static int zoneinfo_show(struct seq_file *m, void *arg)
425 pg_data_t *pgdat = arg;
426 struct zone *zone;
427 struct zone *node_zones = pgdat->node_zones;
428 unsigned long flags;
430 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
431 int i;
433 if (!populated_zone(zone))
434 continue;
436 spin_lock_irqsave(&zone->lock, flags);
437 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
438 seq_printf(m,
439 "\n pages free %lu"
440 "\n min %lu"
441 "\n low %lu"
442 "\n high %lu"
443 "\n active %lu"
444 "\n inactive %lu"
445 "\n scanned %lu (a: %lu i: %lu)"
446 "\n spanned %lu"
447 "\n present %lu",
448 zone->free_pages,
449 zone->pages_min,
450 zone->pages_low,
451 zone->pages_high,
452 zone->nr_active,
453 zone->nr_inactive,
454 zone->pages_scanned,
455 zone->nr_scan_active, zone->nr_scan_inactive,
456 zone->spanned_pages,
457 zone->present_pages);
459 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
460 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
461 zone_page_state(zone, i));
463 seq_printf(m,
464 "\n protection: (%lu",
465 zone->lowmem_reserve[0]);
466 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
467 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
468 seq_printf(m,
470 "\n pagesets");
471 for_each_online_cpu(i) {
472 struct per_cpu_pageset *pageset;
473 int j;
475 pageset = zone_pcp(zone, i);
476 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
477 if (pageset->pcp[j].count)
478 break;
480 if (j == ARRAY_SIZE(pageset->pcp))
481 continue;
482 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
483 seq_printf(m,
484 "\n cpu: %i pcp: %i"
485 "\n count: %i"
486 "\n high: %i"
487 "\n batch: %i",
488 i, j,
489 pageset->pcp[j].count,
490 pageset->pcp[j].high,
491 pageset->pcp[j].batch);
493 #ifdef CONFIG_NUMA
494 seq_printf(m,
495 "\n numa_hit: %lu"
496 "\n numa_miss: %lu"
497 "\n numa_foreign: %lu"
498 "\n interleave_hit: %lu"
499 "\n local_node: %lu"
500 "\n other_node: %lu",
501 pageset->numa_hit,
502 pageset->numa_miss,
503 pageset->numa_foreign,
504 pageset->interleave_hit,
505 pageset->local_node,
506 pageset->other_node);
507 #endif
509 seq_printf(m,
510 "\n all_unreclaimable: %u"
511 "\n prev_priority: %i"
512 "\n temp_priority: %i"
513 "\n start_pfn: %lu",
514 zone->all_unreclaimable,
515 zone->prev_priority,
516 zone->temp_priority,
517 zone->zone_start_pfn);
518 spin_unlock_irqrestore(&zone->lock, flags);
519 seq_putc(m, '\n');
521 return 0;
524 struct seq_operations zoneinfo_op = {
525 .start = frag_start, /* iterate over all zones. The same as in
526 * fragmentation. */
527 .next = frag_next,
528 .stop = frag_stop,
529 .show = zoneinfo_show,
532 static void *vmstat_start(struct seq_file *m, loff_t *pos)
534 unsigned long *v;
535 struct page_state *ps;
536 int i;
538 if (*pos >= ARRAY_SIZE(vmstat_text))
539 return NULL;
541 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
542 + sizeof(*ps), GFP_KERNEL);
543 m->private = v;
544 if (!v)
545 return ERR_PTR(-ENOMEM);
546 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
547 v[i] = global_page_state(i);
548 ps = (struct page_state *)(v + NR_VM_ZONE_STAT_ITEMS);
549 get_full_page_state(ps);
550 ps->pgpgin /= 2; /* sectors -> kbytes */
551 ps->pgpgout /= 2;
552 return v + *pos;
555 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
557 (*pos)++;
558 if (*pos >= ARRAY_SIZE(vmstat_text))
559 return NULL;
560 return (unsigned long *)m->private + *pos;
563 static int vmstat_show(struct seq_file *m, void *arg)
565 unsigned long *l = arg;
566 unsigned long off = l - (unsigned long *)m->private;
568 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
569 return 0;
572 static void vmstat_stop(struct seq_file *m, void *arg)
574 kfree(m->private);
575 m->private = NULL;
578 struct seq_operations vmstat_op = {
579 .start = vmstat_start,
580 .next = vmstat_next,
581 .stop = vmstat_stop,
582 .show = vmstat_show,
585 #endif /* CONFIG_PROC_FS */