IB/mthca: Fix potential AB-BA deadlock with CQ locks
[linux-2.6/mini2440.git] / mm / vmstat.c
blobdfdf24133901790e8ca744934bb57279aa25dee5
1 /*
2 * linux/mm/vmstat.c
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
12 #include <linux/config.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
16 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17 unsigned long *free, struct pglist_data *pgdat)
19 struct zone *zones = pgdat->node_zones;
20 int i;
22 *active = 0;
23 *inactive = 0;
24 *free = 0;
25 for (i = 0; i < MAX_NR_ZONES; i++) {
26 *active += zones[i].nr_active;
27 *inactive += zones[i].nr_inactive;
28 *free += zones[i].free_pages;
32 void get_zone_counts(unsigned long *active,
33 unsigned long *inactive, unsigned long *free)
35 struct pglist_data *pgdat;
37 *active = 0;
38 *inactive = 0;
39 *free = 0;
40 for_each_online_pgdat(pgdat) {
41 unsigned long l, m, n;
42 __get_zone_counts(&l, &m, &n, pgdat);
43 *active += l;
44 *inactive += m;
45 *free += n;
49 #ifdef CONFIG_VM_EVENT_COUNTERS
50 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
51 EXPORT_PER_CPU_SYMBOL(vm_event_states);
53 static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
55 int cpu = 0;
56 int i;
58 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
60 cpu = first_cpu(*cpumask);
61 while (cpu < NR_CPUS) {
62 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
64 cpu = next_cpu(cpu, *cpumask);
66 if (cpu < NR_CPUS)
67 prefetch(&per_cpu(vm_event_states, cpu));
70 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
71 ret[i] += this->event[i];
76 * Accumulate the vm event counters across all CPUs.
77 * The result is unavoidably approximate - it can change
78 * during and after execution of this function.
80 void all_vm_events(unsigned long *ret)
82 sum_vm_events(ret, &cpu_online_map);
84 EXPORT_SYMBOL_GPL(all_vm_events);
86 #ifdef CONFIG_HOTPLUG
88 * Fold the foreign cpu events into our own.
90 * This is adding to the events on one processor
91 * but keeps the global counts constant.
93 void vm_events_fold_cpu(int cpu)
95 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
96 int i;
98 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
99 count_vm_events(i, fold_state->event[i]);
100 fold_state->event[i] = 0;
103 #endif /* CONFIG_HOTPLUG */
105 #endif /* CONFIG_VM_EVENT_COUNTERS */
108 * Manage combined zone based / global counters
110 * vm_stat contains the global counters
112 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
113 EXPORT_SYMBOL(vm_stat);
115 #ifdef CONFIG_SMP
117 #define STAT_THRESHOLD 32
120 * Determine pointer to currently valid differential byte given a zone and
121 * the item number.
123 * Preemption must be off
125 static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item)
127 return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item];
131 * For use when we know that interrupts are disabled.
133 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
134 int delta)
136 s8 *p;
137 long x;
139 p = diff_pointer(zone, item);
140 x = delta + *p;
142 if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) {
143 zone_page_state_add(x, zone, item);
144 x = 0;
147 *p = x;
149 EXPORT_SYMBOL(__mod_zone_page_state);
152 * For an unknown interrupt state
154 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
155 int delta)
157 unsigned long flags;
159 local_irq_save(flags);
160 __mod_zone_page_state(zone, item, delta);
161 local_irq_restore(flags);
163 EXPORT_SYMBOL(mod_zone_page_state);
166 * Optimized increment and decrement functions.
168 * These are only for a single page and therefore can take a struct page *
169 * argument instead of struct zone *. This allows the inclusion of the code
170 * generated for page_zone(page) into the optimized functions.
172 * No overflow check is necessary and therefore the differential can be
173 * incremented or decremented in place which may allow the compilers to
174 * generate better code.
176 * The increment or decrement is known and therefore one boundary check can
177 * be omitted.
179 * Some processors have inc/dec instructions that are atomic vs an interrupt.
180 * However, the code must first determine the differential location in a zone
181 * based on the processor number and then inc/dec the counter. There is no
182 * guarantee without disabling preemption that the processor will not change
183 * in between and therefore the atomicity vs. interrupt cannot be exploited
184 * in a useful way here.
186 static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
188 s8 *p = diff_pointer(zone, item);
190 (*p)++;
192 if (unlikely(*p > STAT_THRESHOLD)) {
193 zone_page_state_add(*p, zone, item);
194 *p = 0;
198 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
200 __inc_zone_state(page_zone(page), item);
202 EXPORT_SYMBOL(__inc_zone_page_state);
204 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
206 struct zone *zone = page_zone(page);
207 s8 *p = diff_pointer(zone, item);
209 (*p)--;
211 if (unlikely(*p < -STAT_THRESHOLD)) {
212 zone_page_state_add(*p, zone, item);
213 *p = 0;
216 EXPORT_SYMBOL(__dec_zone_page_state);
218 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
220 unsigned long flags;
222 local_irq_save(flags);
223 __inc_zone_state(zone, item);
224 local_irq_restore(flags);
227 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
229 unsigned long flags;
230 struct zone *zone;
232 zone = page_zone(page);
233 local_irq_save(flags);
234 __inc_zone_state(zone, item);
235 local_irq_restore(flags);
237 EXPORT_SYMBOL(inc_zone_page_state);
239 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
241 unsigned long flags;
242 struct zone *zone;
243 s8 *p;
245 zone = page_zone(page);
246 local_irq_save(flags);
247 p = diff_pointer(zone, item);
249 (*p)--;
251 if (unlikely(*p < -STAT_THRESHOLD)) {
252 zone_page_state_add(*p, zone, item);
253 *p = 0;
255 local_irq_restore(flags);
257 EXPORT_SYMBOL(dec_zone_page_state);
260 * Update the zone counters for one cpu.
262 void refresh_cpu_vm_stats(int cpu)
264 struct zone *zone;
265 int i;
266 unsigned long flags;
268 for_each_zone(zone) {
269 struct per_cpu_pageset *pcp;
271 pcp = zone_pcp(zone, cpu);
273 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
274 if (pcp->vm_stat_diff[i]) {
275 local_irq_save(flags);
276 zone_page_state_add(pcp->vm_stat_diff[i],
277 zone, i);
278 pcp->vm_stat_diff[i] = 0;
279 local_irq_restore(flags);
284 static void __refresh_cpu_vm_stats(void *dummy)
286 refresh_cpu_vm_stats(smp_processor_id());
290 * Consolidate all counters.
292 * Note that the result is less inaccurate but still inaccurate
293 * if concurrent processes are allowed to run.
295 void refresh_vm_stats(void)
297 on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1);
299 EXPORT_SYMBOL(refresh_vm_stats);
301 #endif
303 #ifdef CONFIG_NUMA
305 * zonelist = the list of zones passed to the allocator
306 * z = the zone from which the allocation occurred.
308 * Must be called with interrupts disabled.
310 void zone_statistics(struct zonelist *zonelist, struct zone *z)
312 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
313 __inc_zone_state(z, NUMA_HIT);
314 } else {
315 __inc_zone_state(z, NUMA_MISS);
316 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
318 if (z->zone_pgdat == NODE_DATA(numa_node_id()))
319 __inc_zone_state(z, NUMA_LOCAL);
320 else
321 __inc_zone_state(z, NUMA_OTHER);
323 #endif
325 #ifdef CONFIG_PROC_FS
327 #include <linux/seq_file.h>
329 static void *frag_start(struct seq_file *m, loff_t *pos)
331 pg_data_t *pgdat;
332 loff_t node = *pos;
333 for (pgdat = first_online_pgdat();
334 pgdat && node;
335 pgdat = next_online_pgdat(pgdat))
336 --node;
338 return pgdat;
341 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
343 pg_data_t *pgdat = (pg_data_t *)arg;
345 (*pos)++;
346 return next_online_pgdat(pgdat);
349 static void frag_stop(struct seq_file *m, void *arg)
354 * This walks the free areas for each zone.
356 static int frag_show(struct seq_file *m, void *arg)
358 pg_data_t *pgdat = (pg_data_t *)arg;
359 struct zone *zone;
360 struct zone *node_zones = pgdat->node_zones;
361 unsigned long flags;
362 int order;
364 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
365 if (!populated_zone(zone))
366 continue;
368 spin_lock_irqsave(&zone->lock, flags);
369 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
370 for (order = 0; order < MAX_ORDER; ++order)
371 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
372 spin_unlock_irqrestore(&zone->lock, flags);
373 seq_putc(m, '\n');
375 return 0;
378 struct seq_operations fragmentation_op = {
379 .start = frag_start,
380 .next = frag_next,
381 .stop = frag_stop,
382 .show = frag_show,
385 static char *vmstat_text[] = {
386 /* Zoned VM counters */
387 "nr_anon_pages",
388 "nr_mapped",
389 "nr_file_pages",
390 "nr_slab",
391 "nr_page_table_pages",
392 "nr_dirty",
393 "nr_writeback",
394 "nr_unstable",
395 "nr_bounce",
397 #ifdef CONFIG_NUMA
398 "numa_hit",
399 "numa_miss",
400 "numa_foreign",
401 "numa_interleave",
402 "numa_local",
403 "numa_other",
404 #endif
406 #ifdef CONFIG_VM_EVENT_COUNTERS
407 "pgpgin",
408 "pgpgout",
409 "pswpin",
410 "pswpout",
412 "pgalloc_dma",
413 "pgalloc_dma32",
414 "pgalloc_normal",
415 "pgalloc_high",
417 "pgfree",
418 "pgactivate",
419 "pgdeactivate",
421 "pgfault",
422 "pgmajfault",
424 "pgrefill_dma",
425 "pgrefill_dma32",
426 "pgrefill_normal",
427 "pgrefill_high",
429 "pgsteal_dma",
430 "pgsteal_dma32",
431 "pgsteal_normal",
432 "pgsteal_high",
434 "pgscan_kswapd_dma",
435 "pgscan_kswapd_dma32",
436 "pgscan_kswapd_normal",
437 "pgscan_kswapd_high",
439 "pgscan_direct_dma",
440 "pgscan_direct_dma32",
441 "pgscan_direct_normal",
442 "pgscan_direct_high",
444 "pginodesteal",
445 "slabs_scanned",
446 "kswapd_steal",
447 "kswapd_inodesteal",
448 "pageoutrun",
449 "allocstall",
451 "pgrotated",
452 #endif
456 * Output information about zones in @pgdat.
458 static int zoneinfo_show(struct seq_file *m, void *arg)
460 pg_data_t *pgdat = arg;
461 struct zone *zone;
462 struct zone *node_zones = pgdat->node_zones;
463 unsigned long flags;
465 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
466 int i;
468 if (!populated_zone(zone))
469 continue;
471 spin_lock_irqsave(&zone->lock, flags);
472 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
473 seq_printf(m,
474 "\n pages free %lu"
475 "\n min %lu"
476 "\n low %lu"
477 "\n high %lu"
478 "\n active %lu"
479 "\n inactive %lu"
480 "\n scanned %lu (a: %lu i: %lu)"
481 "\n spanned %lu"
482 "\n present %lu",
483 zone->free_pages,
484 zone->pages_min,
485 zone->pages_low,
486 zone->pages_high,
487 zone->nr_active,
488 zone->nr_inactive,
489 zone->pages_scanned,
490 zone->nr_scan_active, zone->nr_scan_inactive,
491 zone->spanned_pages,
492 zone->present_pages);
494 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
495 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
496 zone_page_state(zone, i));
498 seq_printf(m,
499 "\n protection: (%lu",
500 zone->lowmem_reserve[0]);
501 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
502 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
503 seq_printf(m,
505 "\n pagesets");
506 for_each_online_cpu(i) {
507 struct per_cpu_pageset *pageset;
508 int j;
510 pageset = zone_pcp(zone, i);
511 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
512 if (pageset->pcp[j].count)
513 break;
515 if (j == ARRAY_SIZE(pageset->pcp))
516 continue;
517 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
518 seq_printf(m,
519 "\n cpu: %i pcp: %i"
520 "\n count: %i"
521 "\n high: %i"
522 "\n batch: %i",
523 i, j,
524 pageset->pcp[j].count,
525 pageset->pcp[j].high,
526 pageset->pcp[j].batch);
529 seq_printf(m,
530 "\n all_unreclaimable: %u"
531 "\n prev_priority: %i"
532 "\n temp_priority: %i"
533 "\n start_pfn: %lu",
534 zone->all_unreclaimable,
535 zone->prev_priority,
536 zone->temp_priority,
537 zone->zone_start_pfn);
538 spin_unlock_irqrestore(&zone->lock, flags);
539 seq_putc(m, '\n');
541 return 0;
544 struct seq_operations zoneinfo_op = {
545 .start = frag_start, /* iterate over all zones. The same as in
546 * fragmentation. */
547 .next = frag_next,
548 .stop = frag_stop,
549 .show = zoneinfo_show,
552 static void *vmstat_start(struct seq_file *m, loff_t *pos)
554 unsigned long *v;
555 #ifdef CONFIG_VM_EVENT_COUNTERS
556 unsigned long *e;
557 #endif
558 int i;
560 if (*pos >= ARRAY_SIZE(vmstat_text))
561 return NULL;
563 #ifdef CONFIG_VM_EVENT_COUNTERS
564 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
565 + sizeof(struct vm_event_state), GFP_KERNEL);
566 #else
567 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
568 GFP_KERNEL);
569 #endif
570 m->private = v;
571 if (!v)
572 return ERR_PTR(-ENOMEM);
573 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
574 v[i] = global_page_state(i);
575 #ifdef CONFIG_VM_EVENT_COUNTERS
576 e = v + NR_VM_ZONE_STAT_ITEMS;
577 all_vm_events(e);
578 e[PGPGIN] /= 2; /* sectors -> kbytes */
579 e[PGPGOUT] /= 2;
580 #endif
581 return v + *pos;
584 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
586 (*pos)++;
587 if (*pos >= ARRAY_SIZE(vmstat_text))
588 return NULL;
589 return (unsigned long *)m->private + *pos;
592 static int vmstat_show(struct seq_file *m, void *arg)
594 unsigned long *l = arg;
595 unsigned long off = l - (unsigned long *)m->private;
597 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
598 return 0;
601 static void vmstat_stop(struct seq_file *m, void *arg)
603 kfree(m->private);
604 m->private = NULL;
607 struct seq_operations vmstat_op = {
608 .start = vmstat_start,
609 .next = vmstat_next,
610 .stop = vmstat_stop,
611 .show = vmstat_show,
614 #endif /* CONFIG_PROC_FS */