4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
12 #include <linux/config.h>
14 #include <linux/module.h>
17 * Accumulate the page_state information across all CPUs.
18 * The result is unavoidably approximate - it can change
19 * during and after execution of this function.
21 DEFINE_PER_CPU(struct page_state
, page_states
) = {0};
23 static void __get_page_state(struct page_state
*ret
, int nr
, cpumask_t
*cpumask
)
27 memset(ret
, 0, nr
* sizeof(unsigned long));
28 cpus_and(*cpumask
, *cpumask
, cpu_online_map
);
30 for_each_cpu_mask(cpu
, *cpumask
) {
36 in
= (unsigned long *)&per_cpu(page_states
, cpu
);
38 next_cpu
= next_cpu(cpu
, *cpumask
);
39 if (likely(next_cpu
< NR_CPUS
))
40 prefetch(&per_cpu(page_states
, next_cpu
));
42 out
= (unsigned long *)ret
;
43 for (off
= 0; off
< nr
; off
++)
48 void get_full_page_state(struct page_state
*ret
)
50 cpumask_t mask
= CPU_MASK_ALL
;
52 __get_page_state(ret
, sizeof(*ret
) / sizeof(unsigned long), &mask
);
55 void __mod_page_state_offset(unsigned long offset
, unsigned long delta
)
59 ptr
= &__get_cpu_var(page_states
);
60 *(unsigned long *)(ptr
+ offset
) += delta
;
62 EXPORT_SYMBOL(__mod_page_state_offset
);
64 void mod_page_state_offset(unsigned long offset
, unsigned long delta
)
69 local_irq_save(flags
);
70 ptr
= &__get_cpu_var(page_states
);
71 *(unsigned long *)(ptr
+ offset
) += delta
;
72 local_irq_restore(flags
);
74 EXPORT_SYMBOL(mod_page_state_offset
);
76 void __get_zone_counts(unsigned long *active
, unsigned long *inactive
,
77 unsigned long *free
, struct pglist_data
*pgdat
)
79 struct zone
*zones
= pgdat
->node_zones
;
85 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
86 *active
+= zones
[i
].nr_active
;
87 *inactive
+= zones
[i
].nr_inactive
;
88 *free
+= zones
[i
].free_pages
;
92 void get_zone_counts(unsigned long *active
,
93 unsigned long *inactive
, unsigned long *free
)
95 struct pglist_data
*pgdat
;
100 for_each_online_pgdat(pgdat
) {
101 unsigned long l
, m
, n
;
102 __get_zone_counts(&l
, &m
, &n
, pgdat
);
110 * Manage combined zone based / global counters
112 * vm_stat contains the global counters
114 atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
115 EXPORT_SYMBOL(vm_stat
);
119 #define STAT_THRESHOLD 32
122 * Determine pointer to currently valid differential byte given a zone and
125 * Preemption must be off
127 static inline s8
*diff_pointer(struct zone
*zone
, enum zone_stat_item item
)
129 return &zone_pcp(zone
, smp_processor_id())->vm_stat_diff
[item
];
133 * For use when we know that interrupts are disabled.
135 void __mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
141 p
= diff_pointer(zone
, item
);
144 if (unlikely(x
> STAT_THRESHOLD
|| x
< -STAT_THRESHOLD
)) {
145 zone_page_state_add(x
, zone
, item
);
151 EXPORT_SYMBOL(__mod_zone_page_state
);
154 * For an unknown interrupt state
156 void mod_zone_page_state(struct zone
*zone
, enum zone_stat_item item
,
161 local_irq_save(flags
);
162 __mod_zone_page_state(zone
, item
, delta
);
163 local_irq_restore(flags
);
165 EXPORT_SYMBOL(mod_zone_page_state
);
168 * Optimized increment and decrement functions.
170 * These are only for a single page and therefore can take a struct page *
171 * argument instead of struct zone *. This allows the inclusion of the code
172 * generated for page_zone(page) into the optimized functions.
174 * No overflow check is necessary and therefore the differential can be
175 * incremented or decremented in place which may allow the compilers to
176 * generate better code.
178 * The increment or decrement is known and therefore one boundary check can
181 * Some processors have inc/dec instructions that are atomic vs an interrupt.
182 * However, the code must first determine the differential location in a zone
183 * based on the processor number and then inc/dec the counter. There is no
184 * guarantee without disabling preemption that the processor will not change
185 * in between and therefore the atomicity vs. interrupt cannot be exploited
186 * in a useful way here.
188 static void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
190 s8
*p
= diff_pointer(zone
, item
);
194 if (unlikely(*p
> STAT_THRESHOLD
)) {
195 zone_page_state_add(*p
, zone
, item
);
200 void __inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
202 __inc_zone_state(page_zone(page
), item
);
204 EXPORT_SYMBOL(__inc_zone_page_state
);
206 void __dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
208 struct zone
*zone
= page_zone(page
);
209 s8
*p
= diff_pointer(zone
, item
);
213 if (unlikely(*p
< -STAT_THRESHOLD
)) {
214 zone_page_state_add(*p
, zone
, item
);
218 EXPORT_SYMBOL(__dec_zone_page_state
);
220 void inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
224 local_irq_save(flags
);
225 __inc_zone_state(zone
, item
);
226 local_irq_restore(flags
);
229 void inc_zone_page_state(struct page
*page
, enum zone_stat_item item
)
234 zone
= page_zone(page
);
235 local_irq_save(flags
);
236 __inc_zone_state(zone
, item
);
237 local_irq_restore(flags
);
239 EXPORT_SYMBOL(inc_zone_page_state
);
241 void dec_zone_page_state(struct page
*page
, enum zone_stat_item item
)
247 zone
= page_zone(page
);
248 local_irq_save(flags
);
249 p
= diff_pointer(zone
, item
);
253 if (unlikely(*p
< -STAT_THRESHOLD
)) {
254 zone_page_state_add(*p
, zone
, item
);
257 local_irq_restore(flags
);
259 EXPORT_SYMBOL(dec_zone_page_state
);
262 * Update the zone counters for one cpu.
264 void refresh_cpu_vm_stats(int cpu
)
270 for_each_zone(zone
) {
271 struct per_cpu_pageset
*pcp
;
273 pcp
= zone_pcp(zone
, cpu
);
275 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
276 if (pcp
->vm_stat_diff
[i
]) {
277 local_irq_save(flags
);
278 zone_page_state_add(pcp
->vm_stat_diff
[i
],
280 pcp
->vm_stat_diff
[i
] = 0;
281 local_irq_restore(flags
);
286 static void __refresh_cpu_vm_stats(void *dummy
)
288 refresh_cpu_vm_stats(smp_processor_id());
292 * Consolidate all counters.
294 * Note that the result is less inaccurate but still inaccurate
295 * if concurrent processes are allowed to run.
297 void refresh_vm_stats(void)
299 on_each_cpu(__refresh_cpu_vm_stats
, NULL
, 0, 1);
301 EXPORT_SYMBOL(refresh_vm_stats
);
307 * zonelist = the list of zones passed to the allocator
308 * z = the zone from which the allocation occurred.
310 * Must be called with interrupts disabled.
312 void zone_statistics(struct zonelist
*zonelist
, struct zone
*z
)
314 if (z
->zone_pgdat
== zonelist
->zones
[0]->zone_pgdat
) {
315 __inc_zone_state(z
, NUMA_HIT
);
317 __inc_zone_state(z
, NUMA_MISS
);
318 __inc_zone_state(zonelist
->zones
[0], NUMA_FOREIGN
);
320 if (z
->zone_pgdat
== NODE_DATA(numa_node_id()))
321 __inc_zone_state(z
, NUMA_LOCAL
);
323 __inc_zone_state(z
, NUMA_OTHER
);
327 #ifdef CONFIG_PROC_FS
329 #include <linux/seq_file.h>
331 static void *frag_start(struct seq_file
*m
, loff_t
*pos
)
335 for (pgdat
= first_online_pgdat();
337 pgdat
= next_online_pgdat(pgdat
))
343 static void *frag_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
345 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
348 return next_online_pgdat(pgdat
);
351 static void frag_stop(struct seq_file
*m
, void *arg
)
356 * This walks the free areas for each zone.
358 static int frag_show(struct seq_file
*m
, void *arg
)
360 pg_data_t
*pgdat
= (pg_data_t
*)arg
;
362 struct zone
*node_zones
= pgdat
->node_zones
;
366 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; ++zone
) {
367 if (!populated_zone(zone
))
370 spin_lock_irqsave(&zone
->lock
, flags
);
371 seq_printf(m
, "Node %d, zone %8s ", pgdat
->node_id
, zone
->name
);
372 for (order
= 0; order
< MAX_ORDER
; ++order
)
373 seq_printf(m
, "%6lu ", zone
->free_area
[order
].nr_free
);
374 spin_unlock_irqrestore(&zone
->lock
, flags
);
380 struct seq_operations fragmentation_op
= {
387 static char *vmstat_text
[] = {
388 /* Zoned VM counters */
393 "nr_page_table_pages",
436 "pgscan_kswapd_high",
437 "pgscan_kswapd_normal",
438 "pgscan_kswapd_dma32",
441 "pgscan_direct_high",
442 "pgscan_direct_normal",
443 "pgscan_direct_dma32",
457 * Output information about zones in @pgdat.
459 static int zoneinfo_show(struct seq_file
*m
, void *arg
)
461 pg_data_t
*pgdat
= arg
;
463 struct zone
*node_zones
= pgdat
->node_zones
;
466 for (zone
= node_zones
; zone
- node_zones
< MAX_NR_ZONES
; zone
++) {
469 if (!populated_zone(zone
))
472 spin_lock_irqsave(&zone
->lock
, flags
);
473 seq_printf(m
, "Node %d, zone %8s", pgdat
->node_id
, zone
->name
);
481 "\n scanned %lu (a: %lu i: %lu)"
491 zone
->nr_scan_active
, zone
->nr_scan_inactive
,
493 zone
->present_pages
);
495 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
496 seq_printf(m
, "\n %-12s %lu", vmstat_text
[i
],
497 zone_page_state(zone
, i
));
500 "\n protection: (%lu",
501 zone
->lowmem_reserve
[0]);
502 for (i
= 1; i
< ARRAY_SIZE(zone
->lowmem_reserve
); i
++)
503 seq_printf(m
, ", %lu", zone
->lowmem_reserve
[i
]);
507 for_each_online_cpu(i
) {
508 struct per_cpu_pageset
*pageset
;
511 pageset
= zone_pcp(zone
, i
);
512 for (j
= 0; j
< ARRAY_SIZE(pageset
->pcp
); j
++) {
513 if (pageset
->pcp
[j
].count
)
516 if (j
== ARRAY_SIZE(pageset
->pcp
))
518 for (j
= 0; j
< ARRAY_SIZE(pageset
->pcp
); j
++) {
525 pageset
->pcp
[j
].count
,
526 pageset
->pcp
[j
].high
,
527 pageset
->pcp
[j
].batch
);
531 "\n all_unreclaimable: %u"
532 "\n prev_priority: %i"
533 "\n temp_priority: %i"
535 zone
->all_unreclaimable
,
538 zone
->zone_start_pfn
);
539 spin_unlock_irqrestore(&zone
->lock
, flags
);
545 struct seq_operations zoneinfo_op
= {
546 .start
= frag_start
, /* iterate over all zones. The same as in
550 .show
= zoneinfo_show
,
553 static void *vmstat_start(struct seq_file
*m
, loff_t
*pos
)
556 struct page_state
*ps
;
559 if (*pos
>= ARRAY_SIZE(vmstat_text
))
562 v
= kmalloc(NR_VM_ZONE_STAT_ITEMS
* sizeof(unsigned long)
563 + sizeof(*ps
), GFP_KERNEL
);
566 return ERR_PTR(-ENOMEM
);
567 for (i
= 0; i
< NR_VM_ZONE_STAT_ITEMS
; i
++)
568 v
[i
] = global_page_state(i
);
569 ps
= (struct page_state
*)(v
+ NR_VM_ZONE_STAT_ITEMS
);
570 get_full_page_state(ps
);
571 ps
->pgpgin
/= 2; /* sectors -> kbytes */
576 static void *vmstat_next(struct seq_file
*m
, void *arg
, loff_t
*pos
)
579 if (*pos
>= ARRAY_SIZE(vmstat_text
))
581 return (unsigned long *)m
->private + *pos
;
584 static int vmstat_show(struct seq_file
*m
, void *arg
)
586 unsigned long *l
= arg
;
587 unsigned long off
= l
- (unsigned long *)m
->private;
589 seq_printf(m
, "%s %lu\n", vmstat_text
[off
], *l
);
593 static void vmstat_stop(struct seq_file
*m
, void *arg
)
599 struct seq_operations vmstat_op
= {
600 .start
= vmstat_start
,
606 #endif /* CONFIG_PROC_FS */