4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
12 #include <linux/config.h>
14 #include <linux/module.h>
16 void __get_zone_counts(unsigned long *active
, unsigned long *inactive
,
17 unsigned long *free
, struct pglist_data
*pgdat
)
19 struct zone
*zones
= pgdat
->node_zones
;
25 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
26 *active
+= zones
[i
].nr_active
;
27 *inactive
+= zones
[i
].nr_inactive
;
28 *free
+= zones
[i
].free_pages
;
32 void get_zone_counts(unsigned long *active
,
33 unsigned long *inactive
, unsigned long *free
)
35 struct pglist_data
*pgdat
;
40 for_each_online_pgdat(pgdat
) {
41 unsigned long l
, m
, n
;
42 __get_zone_counts(&l
, &m
, &n
, pgdat
);
49 #ifdef CONFIG_VM_EVENT_COUNTERS
50 DEFINE_PER_CPU(struct vm_event_state
, vm_event_states
) = {{0}};
51 EXPORT_PER_CPU_SYMBOL(vm_event_states
);
53 static void sum_vm_events(unsigned long *ret
, cpumask_t
*cpumask
)
58 memset(ret
, 0, NR_VM_EVENT_ITEMS
* sizeof(unsigned long));
60 cpu
= first_cpu(*cpumask
);
61 while (cpu
< NR_CPUS
) {
62 struct vm_event_state
*this = &per_cpu(vm_event_states
, cpu
);
64 cpu
= next_cpu(cpu
, *cpumask
);
67 prefetch(&per_cpu(vm_event_states
, cpu
));
70 for (i
= 0; i
< NR_VM_EVENT_ITEMS
; i
++)
71 ret
[i
] += this->event
[i
];
76 * Accumulate the vm event counters across all CPUs.
77 * The result is unavoidably approximate - it can change
78 * during and after execution of this function.
80 void all_vm_events(unsigned long *ret
)
82 sum_vm_events(ret
, &cpu_online_map
);
84 EXPORT_SYMBOL_GPL(all_vm_events
);
88 * Fold the foreign cpu events into our own.
90 * This is adding to the events on one processor
91 * but keeps the global counts constant.
93 void vm_events_fold_cpu(int cpu
)
95 struct vm_event_state
*fold_state
= &per_cpu(vm_event_states
, cpu
);
98 for (i
= 0; i
< NR_VM_EVENT_ITEMS
; i
++) {
99 count_vm_events(i
, fold_state
->event
[i
]);
100 fold_state
->event
[i
] = 0;
103 #endif /* CONFIG_HOTPLUG */
105 #endif /* CONFIG_VM_EVENT_COUNTERS */
108 * Manage combined zone based / global counters
110 * vm_stat contains the global counters
112 atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
113 EXPORT_SYMBOL(vm_stat
);
117 #define STAT_THRESHOLD 32
120 * Determine pointer to currently valid differential byte given a zone and
123 * Preemption must be off
125 static inline s8
*diff_pointer(struct zone
*zone
, enum zone_stat_item item
)
127 return &zone_pcp(zone
, smp_processor_id())->vm_stat_diff
[item
];
131 * For use when we know that interrupts are disabled.
133 void __mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
139 p
= diff_pointer(zone
, item
);
142 if (unlikely(x
> STAT_THRESHOLD
|| x
< -STAT_THRESHOLD
)) {
143 zone_page_state_add(x
, zone
, item
);
149 EXPORT_SYMBOL(__mod_zone_page_state
);
152 * For an unknown interrupt state
154 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
159 local_irq_save(flags
);
160 __mod_zone_page_state(zone
, item
, delta
);
161 local_irq_restore(flags
);
163 EXPORT_SYMBOL(mod_zone_page_state
);
166 * Optimized increment and decrement functions.
168 * These are only for a single page and therefore can take a struct page *
169 * argument instead of struct zone *. This allows the inclusion of the code
170 * generated for page_zone(page) into the optimized functions.
172 * No overflow check is necessary and therefore the differential can be
173 * incremented or decremented in place which may allow the compilers to
174 * generate better code.
176 * The increment or decrement is known and therefore one boundary check can
179 * Some processors have inc/dec instructions that are atomic vs an interrupt.
180 * However, the code must first determine the differential location in a zone
181 * based on the processor number and then inc/dec the counter. There is no
182 * guarantee without disabling preemption that the processor will not change
183 * in between and therefore the atomicity vs. interrupt cannot be exploited
184 * in a useful way here.
186 static void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
188 s8
*p
= diff_pointer(zone
, item
);
192 if (unlikely(*p
> STAT_THRESHOLD
)) {
193 zone_page_state_add(*p
, zone
, item
);
198 void __inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
200 __inc_zone_state(page_zone(page
), item
);
202 EXPORT_SYMBOL(__inc_zone_page_state
);
204 void __dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
206 struct zone
*zone
= page_zone(page
);
207 s8
*p
= diff_pointer(zone
, item
);
211 if (unlikely(*p
< -STAT_THRESHOLD
)) {
212 zone_page_state_add(*p
, zone
, item
);
216 EXPORT_SYMBOL(__dec_zone_page_state
);
218 void inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
222 local_irq_save(flags
);
223 __inc_zone_state(zone
, item
);
224 local_irq_restore(flags
);
227 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
232 zone
= page_zone(page
);
233 local_irq_save(flags
);
234 __inc_zone_state(zone
, item
);
235 local_irq_restore(flags
);
237 EXPORT_SYMBOL(inc_zone_page_state
);
239 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
245 zone
= page_zone(page
);
246 local_irq_save(flags
);
247 p
= diff_pointer(zone
, item
);
251 if (unlikely(*p
< -STAT_THRESHOLD
)) {
252 zone_page_state_add(*p
, zone
, item
);
255 local_irq_restore(flags
);
257 EXPORT_SYMBOL(dec_zone_page_state
);
260 * Update the zone counters for one cpu.
262 void refresh_cpu_vm_stats(int cpu
)
268 for_each_zone(zone
) {
269 struct per_cpu_pageset
*pcp
;
271 pcp
= zone_pcp(zone
, cpu
);
273 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
274 if (pcp
->vm_stat_diff
[i
]) {
275 local_irq_save(flags
);
276 zone_page_state_add(pcp
->vm_stat_diff
[i
],
278 pcp
->vm_stat_diff
[i
] = 0;
279 local_irq_restore(flags
);
284 static void __refresh_cpu_vm_stats(void *dummy
)
286 refresh_cpu_vm_stats(smp_processor_id());
290 * Consolidate all counters.
292 * Note that the result is less inaccurate but still inaccurate
293 * if concurrent processes are allowed to run.
295 void refresh_vm_stats(void)
297 on_each_cpu(__refresh_cpu_vm_stats
, NULL
, 0, 1);
299 EXPORT_SYMBOL(refresh_vm_stats
);
305 * zonelist = the list of zones passed to the allocator
306 * z = the zone from which the allocation occurred.
308 * Must be called with interrupts disabled.
310 void zone_statistics(struct zonelist
*zonelist
, struct zone
*z
)
312 if (z
->zone_pgdat
== zonelist
->zones
[0]->zone_pgdat
) {
313 __inc_zone_state(z
, NUMA_HIT
);
315 __inc_zone_state(z
, NUMA_MISS
);
316 __inc_zone_state(zonelist
->zones
[0], NUMA_FOREIGN
);
318 if (z
->zone_pgdat
== NODE_DATA(numa_node_id()))
319 __inc_zone_state(z
, NUMA_LOCAL
);
321 __inc_zone_state(z
, NUMA_OTHER
);
325 #ifdef CONFIG_PROC_FS
327 #include <linux/seq_file.h>
329 static void *frag_start(struct seq_file
*m
, loff_t
*pos
)
333 for (pgdat
= first_online_pgdat();
335 pgdat
= next_online_pgdat(pgdat
))
341 static void *frag_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
343 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
346 return next_online_pgdat(pgdat
);
349 static void frag_stop(struct seq_file
*m
, void *arg
)
354 * This walks the free areas for each zone.
356 static int frag_show(struct seq_file
*m
, void *arg
)
358 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
360 struct zone
*node_zones
= pgdat
->node_zones
;
364 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; ++zone
) {
365 if (!populated_zone(zone
))
368 spin_lock_irqsave(&zone
->lock
, flags
);
369 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
370 for (order
= 0; order
< MAX_ORDER
; ++order
)
371 seq_printf(m
, "%6lu ", zone
->free_area
[order
].nr_free
);
372 spin_unlock_irqrestore(&zone
->lock
, flags
);
378 struct seq_operations fragmentation_op
= {
385 static char *vmstat_text
[] = {
386 /* Zoned VM counters */
391 "nr_page_table_pages",
406 #ifdef CONFIG_VM_EVENT_COUNTERS
435 "pgscan_kswapd_dma32",
436 "pgscan_kswapd_normal",
437 "pgscan_kswapd_high",
440 "pgscan_direct_dma32",
441 "pgscan_direct_normal",
442 "pgscan_direct_high",
456 * Output information about zones in @pgdat.
458 static int zoneinfo_show(struct seq_file
*m
, void *arg
)
460 pg_data_t
*pgdat
= arg
;
462 struct zone
*node_zones
= pgdat
->node_zones
;
465 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; zone
++) {
468 if (!populated_zone(zone
))
471 spin_lock_irqsave(&zone
->lock
, flags
);
472 seq_printf(m
, "Node %d, zone %8s", pgdat
->node_id
, zone
->name
);
480 "\n scanned %lu (a: %lu i: %lu)"
490 zone
->nr_scan_active
, zone
->nr_scan_inactive
,
492 zone
->present_pages
);
494 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
495 seq_printf(m
, "\n %-12s %lu", vmstat_text
[i
],
496 zone_page_state(zone
, i
));
499 "\n protection: (%lu",
500 zone
->lowmem_reserve
[0]);
501 for (i
= 1; i
< ARRAY_SIZE(zone
->lowmem_reserve
); i
++)
502 seq_printf(m
, ", %lu", zone
->lowmem_reserve
[i
]);
506 for_each_online_cpu(i
) {
507 struct per_cpu_pageset
*pageset
;
510 pageset
= zone_pcp(zone
, i
);
511 for (j
= 0; j
< ARRAY_SIZE(pageset
->pcp
); j
++) {
512 if (pageset
->pcp
[j
].count
)
515 if (j
== ARRAY_SIZE(pageset
->pcp
))
517 for (j
= 0; j
< ARRAY_SIZE(pageset
->pcp
); j
++) {
524 pageset
->pcp
[j
].count
,
525 pageset
->pcp
[j
].high
,
526 pageset
->pcp
[j
].batch
);
530 "\n all_unreclaimable: %u"
531 "\n prev_priority: %i"
532 "\n temp_priority: %i"
534 zone
->all_unreclaimable
,
537 zone
->zone_start_pfn
);
538 spin_unlock_irqrestore(&zone
->lock
, flags
);
544 struct seq_operations zoneinfo_op
= {
545 .start
= frag_start
, /* iterate over all zones. The same as in
549 .show
= zoneinfo_show
,
552 static void *vmstat_start(struct seq_file
*m
, loff_t
*pos
)
555 #ifdef CONFIG_VM_EVENT_COUNTERS
560 if (*pos
>= ARRAY_SIZE(vmstat_text
))
563 #ifdef CONFIG_VM_EVENT_COUNTERS
564 v
= kmalloc(NR_VM_ZONE_STAT_ITEMS
* sizeof(unsigned long)
565 + sizeof(struct vm_event_state
), GFP_KERNEL
);
567 v
= kmalloc(NR_VM_ZONE_STAT_ITEMS
* sizeof(unsigned long),
572 return ERR_PTR(-ENOMEM
);
573 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
574 v
[i
] = global_page_state(i
);
575 #ifdef CONFIG_VM_EVENT_COUNTERS
576 e
= v
+ NR_VM_ZONE_STAT_ITEMS
;
578 e
[PGPGIN
] /= 2; /* sectors -> kbytes */
584 static void *vmstat_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
587 if (*pos
>= ARRAY_SIZE(vmstat_text
))
589 return (unsigned long *)m
->private + *pos
;
592 static int vmstat_show(struct seq_file
*m
, void *arg
)
594 unsigned long *l
= arg
;
595 unsigned long off
= l
- (unsigned long *)m
->private;
597 seq_printf(m
, "%s %lu\n", vmstat_text
[off
], *l
);
601 static void vmstat_stop(struct seq_file
*m
, void *arg
)
607 struct seq_operations vmstat_op
= {
608 .start
= vmstat_start
,
614 #endif /* CONFIG_PROC_FS */