4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
12 #include <linux/config.h>
14 #include <linux/module.h>
17 * Accumulate the page_state information across all CPUs.
18 * The result is unavoidably approximate - it can change
19 * during and after execution of this function.
21 DEFINE_PER_CPU(struct page_state
, page_states
) = {0};
23 static void __get_page_state(struct page_state
*ret
, int nr
, cpumask_t
*cpumask
)
27 memset(ret
, 0, nr
* sizeof(unsigned long));
28 cpus_and(*cpumask
, *cpumask
, cpu_online_map
);
30 for_each_cpu_mask(cpu
, *cpumask
) {
36 in
= (unsigned long *)&per_cpu(page_states
, cpu
);
38 next_cpu
= next_cpu(cpu
, *cpumask
);
39 if (likely(next_cpu
< NR_CPUS
))
40 prefetch(&per_cpu(page_states
, next_cpu
));
42 out
= (unsigned long *)ret
;
43 for (off
= 0; off
< nr
; off
++)
48 void get_full_page_state(struct page_state
*ret
)
50 cpumask_t mask
= CPU_MASK_ALL
;
52 __get_page_state(ret
, sizeof(*ret
) / sizeof(unsigned long), &mask
);
55 void __mod_page_state_offset(unsigned long offset
, unsigned long delta
)
59 ptr
= &__get_cpu_var(page_states
);
60 *(unsigned long *)(ptr
+ offset
) += delta
;
62 EXPORT_SYMBOL(__mod_page_state_offset
);
64 void mod_page_state_offset(unsigned long offset
, unsigned long delta
)
69 local_irq_save(flags
);
70 ptr
= &__get_cpu_var(page_states
);
71 *(unsigned long *)(ptr
+ offset
) += delta
;
72 local_irq_restore(flags
);
74 EXPORT_SYMBOL(mod_page_state_offset
);
76 void __get_zone_counts(unsigned long *active
, unsigned long *inactive
,
77 unsigned long *free
, struct pglist_data
*pgdat
)
79 struct zone
*zones
= pgdat
->node_zones
;
85 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
86 *active
+= zones
[i
].nr_active
;
87 *inactive
+= zones
[i
].nr_inactive
;
88 *free
+= zones
[i
].free_pages
;
92 void get_zone_counts(unsigned long *active
,
93 unsigned long *inactive
, unsigned long *free
)
95 struct pglist_data
*pgdat
;
100 for_each_online_pgdat(pgdat
) {
101 unsigned long l
, m
, n
;
102 __get_zone_counts(&l
, &m
, &n
, pgdat
);
110 * Manage combined zone based / global counters
112 * vm_stat contains the global counters
114 atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
115 EXPORT_SYMBOL(vm_stat
);
119 #define STAT_THRESHOLD 32
122 * Determine pointer to currently valid differential byte given a zone and
125 * Preemption must be off
127 static inline s8
*diff_pointer(struct zone
*zone
, enum zone_stat_item item
)
129 return &zone_pcp(zone
, smp_processor_id())->vm_stat_diff
[item
];
133 * For use when we know that interrupts are disabled.
135 void __mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
141 p
= diff_pointer(zone
, item
);
144 if (unlikely(x
> STAT_THRESHOLD
|| x
< -STAT_THRESHOLD
)) {
145 zone_page_state_add(x
, zone
, item
);
151 EXPORT_SYMBOL(__mod_zone_page_state
);
154 * For an unknown interrupt state
156 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
161 local_irq_save(flags
);
162 __mod_zone_page_state(zone
, item
, delta
);
163 local_irq_restore(flags
);
165 EXPORT_SYMBOL(mod_zone_page_state
);
168 * Optimized increment and decrement functions.
170 * These are only for a single page and therefore can take a struct page *
171 * argument instead of struct zone *. This allows the inclusion of the code
172 * generated for page_zone(page) into the optimized functions.
174 * No overflow check is necessary and therefore the differential can be
175 * incremented or decremented in place which may allow the compilers to
176 * generate better code.
178 * The increment or decrement is known and therefore one boundary check can
181 * Some processors have inc/dec instructions that are atomic vs an interrupt.
182 * However, the code must first determine the differential location in a zone
183 * based on the processor number and then inc/dec the counter. There is no
184 * guarantee without disabling preemption that the processor will not change
185 * in between and therefore the atomicity vs. interrupt cannot be exploited
186 * in a useful way here.
188 void __inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
190 struct zone
*zone
= page_zone(page
);
191 s8
*p
= diff_pointer(zone
, item
);
195 if (unlikely(*p
> STAT_THRESHOLD
)) {
196 zone_page_state_add(*p
, zone
, item
);
200 EXPORT_SYMBOL(__inc_zone_page_state
);
202 void __dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
204 struct zone
*zone
= page_zone(page
);
205 s8
*p
= diff_pointer(zone
, item
);
209 if (unlikely(*p
< -STAT_THRESHOLD
)) {
210 zone_page_state_add(*p
, zone
, item
);
214 EXPORT_SYMBOL(__dec_zone_page_state
);
216 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
222 zone
= page_zone(page
);
223 local_irq_save(flags
);
224 p
= diff_pointer(zone
, item
);
228 if (unlikely(*p
> STAT_THRESHOLD
)) {
229 zone_page_state_add(*p
, zone
, item
);
232 local_irq_restore(flags
);
234 EXPORT_SYMBOL(inc_zone_page_state
);
236 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
242 zone
= page_zone(page
);
243 local_irq_save(flags
);
244 p
= diff_pointer(zone
, item
);
248 if (unlikely(*p
< -STAT_THRESHOLD
)) {
249 zone_page_state_add(*p
, zone
, item
);
252 local_irq_restore(flags
);
254 EXPORT_SYMBOL(dec_zone_page_state
);
257 * Update the zone counters for one cpu.
259 void refresh_cpu_vm_stats(int cpu
)
265 for_each_zone(zone
) {
266 struct per_cpu_pageset
*pcp
;
268 pcp
= zone_pcp(zone
, cpu
);
270 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
271 if (pcp
->vm_stat_diff
[i
]) {
272 local_irq_save(flags
);
273 zone_page_state_add(pcp
->vm_stat_diff
[i
],
275 pcp
->vm_stat_diff
[i
] = 0;
276 local_irq_restore(flags
);
281 static void __refresh_cpu_vm_stats(void *dummy
)
283 refresh_cpu_vm_stats(smp_processor_id());
287 * Consolidate all counters.
289 * Note that the result is less inaccurate but still inaccurate
290 * if concurrent processes are allowed to run.
292 void refresh_vm_stats(void)
294 on_each_cpu(__refresh_cpu_vm_stats
, NULL
, 0, 1);
296 EXPORT_SYMBOL(refresh_vm_stats
);
300 #ifdef CONFIG_PROC_FS
302 #include <linux/seq_file.h>
304 static void *frag_start(struct seq_file
*m
, loff_t
*pos
)
308 for (pgdat
= first_online_pgdat();
310 pgdat
= next_online_pgdat(pgdat
))
316 static void *frag_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
318 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
321 return next_online_pgdat(pgdat
);
324 static void frag_stop(struct seq_file
*m
, void *arg
)
329 * This walks the free areas for each zone.
331 static int frag_show(struct seq_file
*m
, void *arg
)
333 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
335 struct zone
*node_zones
= pgdat
->node_zones
;
339 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; ++zone
) {
340 if (!populated_zone(zone
))
343 spin_lock_irqsave(&zone
->lock
, flags
);
344 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
345 for (order
= 0; order
< MAX_ORDER
; ++order
)
346 seq_printf(m
, "%6lu ", zone
->free_area
[order
].nr_free
);
347 spin_unlock_irqrestore(&zone
->lock
, flags
);
353 struct seq_operations fragmentation_op
= {
360 static char *vmstat_text
[] = {
361 /* Zoned VM counters */
366 "nr_page_table_pages",
400 "pgscan_kswapd_high",
401 "pgscan_kswapd_normal",
402 "pgscan_kswapd_dma32",
405 "pgscan_direct_high",
406 "pgscan_direct_normal",
407 "pgscan_direct_dma32",
421 * Output information about zones in @pgdat.
423 static int zoneinfo_show(struct seq_file
*m
, void *arg
)
425 pg_data_t
*pgdat
= arg
;
427 struct zone
*node_zones
= pgdat
->node_zones
;
430 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; zone
++) {
433 if (!populated_zone(zone
))
436 spin_lock_irqsave(&zone
->lock
, flags
);
437 seq_printf(m
, "Node %d, zone %8s", pgdat
->node_id
, zone
->name
);
445 "\n scanned %lu (a: %lu i: %lu)"
455 zone
->nr_scan_active
, zone
->nr_scan_inactive
,
457 zone
->present_pages
);
459 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
460 seq_printf(m
, "\n %-12s %lu", vmstat_text
[i
],
461 zone_page_state(zone
, i
));
464 "\n protection: (%lu",
465 zone
->lowmem_reserve
[0]);
466 for (i
= 1; i
< ARRAY_SIZE(zone
->lowmem_reserve
); i
++)
467 seq_printf(m
, ", %lu", zone
->lowmem_reserve
[i
]);
471 for_each_online_cpu(i
) {
472 struct per_cpu_pageset
*pageset
;
475 pageset
= zone_pcp(zone
, i
);
476 for (j
= 0; j
< ARRAY_SIZE(pageset
->pcp
); j
++) {
477 if (pageset
->pcp
[j
].count
)
480 if (j
== ARRAY_SIZE(pageset
->pcp
))
482 for (j
= 0; j
< ARRAY_SIZE(pageset
->pcp
); j
++) {
489 pageset
->pcp
[j
].count
,
490 pageset
->pcp
[j
].high
,
491 pageset
->pcp
[j
].batch
);
497 "\n numa_foreign: %lu"
498 "\n interleave_hit: %lu"
500 "\n other_node: %lu",
503 pageset
->numa_foreign
,
504 pageset
->interleave_hit
,
506 pageset
->other_node
);
510 "\n all_unreclaimable: %u"
511 "\n prev_priority: %i"
512 "\n temp_priority: %i"
514 zone
->all_unreclaimable
,
517 zone
->zone_start_pfn
);
518 spin_unlock_irqrestore(&zone
->lock
, flags
);
524 struct seq_operations zoneinfo_op
= {
525 .start
= frag_start
, /* iterate over all zones. The same as in
529 .show
= zoneinfo_show
,
532 static void *vmstat_start(struct seq_file
*m
, loff_t
*pos
)
535 struct page_state
*ps
;
538 if (*pos
>= ARRAY_SIZE(vmstat_text
))
541 v
= kmalloc(NR_VM_ZONE_STAT_ITEMS
* sizeof(unsigned long)
542 + sizeof(*ps
), GFP_KERNEL
);
545 return ERR_PTR(-ENOMEM
);
546 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
547 v
[i
] = global_page_state(i
);
548 ps
= (struct page_state
*)(v
+ NR_VM_ZONE_STAT_ITEMS
);
549 get_full_page_state(ps
);
550 ps
->pgpgin
/= 2; /* sectors -> kbytes */
555 static void *vmstat_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
558 if (*pos
>= ARRAY_SIZE(vmstat_text
))
560 return (unsigned long *)m
->private + *pos
;
563 static int vmstat_show(struct seq_file
*m
, void *arg
)
565 unsigned long *l
= arg
;
566 unsigned long off
= l
- (unsigned long *)m
->private;
568 seq_printf(m
, "%s %lu\n", vmstat_text
[off
], *l
);
572 static void vmstat_stop(struct seq_file
*m
, void *arg
)
578 struct seq_operations vmstat_op
= {
579 .start
= vmstat_start
,
585 #endif /* CONFIG_PROC_FS */