1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
19 #define DMA32_ZONE(xx)
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
25 #define HIGHMEM_ZONE(xx)
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
31 enum vm_event_item
{ PGPGIN
, PGPGOUT
, PSWPIN
, PSWPOUT
,
32 FOR_ALL_ZONES(PGALLOC
),
33 PGFREE
, PGACTIVATE
, PGDEACTIVATE
,
35 FOR_ALL_ZONES(PGREFILL
),
36 FOR_ALL_ZONES(PGSTEAL
),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD
),
38 FOR_ALL_ZONES(PGSCAN_DIRECT
),
40 PGSCAN_ZONE_RECLAIM_FAILED
,
42 PGINODESTEAL
, SLABS_SCANNED
, KSWAPD_STEAL
, KSWAPD_INODESTEAL
,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY
, KSWAPD_HIGH_WMARK_HIT_QUICKLY
,
44 KSWAPD_SKIP_CONGESTION_WAIT
,
45 PAGEOUTRUN
, ALLOCSTALL
, PGROTATED
,
46 #ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS
, COMPACTPAGES
, COMPACTPAGEFAILED
,
48 COMPACTSTALL
, COMPACTFAIL
, COMPACTSUCCESS
,
50 #ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC
, HTLB_BUDDY_PGALLOC_FAIL
,
53 UNEVICTABLE_PGCULLED
, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED
, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED
, /* rescued from noreclaim list */
56 UNEVICTABLE_PGMLOCKED
,
57 UNEVICTABLE_PGMUNLOCKED
,
58 UNEVICTABLE_PGCLEARED
, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED
, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED
,
64 extern int sysctl_stat_interval
;
66 #ifdef CONFIG_VM_EVENT_COUNTERS
68 * Light weight per cpu counter implementation.
70 * Counters should only be incremented and no critical kernel component
71 * should rely on the counter values.
73 * Counters are handled completely inline. On many platforms the code
74 * generated will simply be the increment of a global address.
77 struct vm_event_state
{
78 unsigned long event
[NR_VM_EVENT_ITEMS
];
81 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
83 static inline void __count_vm_event(enum vm_event_item item
)
85 __this_cpu_inc(vm_event_states
.event
[item
]);
88 static inline void count_vm_event(enum vm_event_item item
)
90 this_cpu_inc(vm_event_states
.event
[item
]);
93 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
95 __this_cpu_add(vm_event_states
.event
[item
], delta
);
98 static inline void count_vm_events(enum vm_event_item item
, long delta
)
100 this_cpu_add(vm_event_states
.event
[item
], delta
);
103 extern void all_vm_events(unsigned long *);
104 #ifdef CONFIG_HOTPLUG
105 extern void vm_events_fold_cpu(int cpu
);
107 static inline void vm_events_fold_cpu(int cpu
)
114 /* Disable counters */
115 static inline void count_vm_event(enum vm_event_item item
)
118 static inline void count_vm_events(enum vm_event_item item
, long delta
)
121 static inline void __count_vm_event(enum vm_event_item item
)
124 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
127 static inline void all_vm_events(unsigned long *ret
)
130 static inline void vm_events_fold_cpu(int cpu
)
134 #endif /* CONFIG_VM_EVENT_COUNTERS */
136 #define __count_zone_vm_events(item, zone, delta) \
137 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
138 zone_idx(zone), delta)
141 * Zone based page accounting with per cpu differentials.
143 extern atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
145 static inline void zone_page_state_add(long x
, struct zone
*zone
,
146 enum zone_stat_item item
)
148 atomic_long_add(x
, &zone
->vm_stat
[item
]);
149 atomic_long_add(x
, &vm_stat
[item
]);
152 static inline unsigned long global_page_state(enum zone_stat_item item
)
154 long x
= atomic_long_read(&vm_stat
[item
]);
162 static inline unsigned long zone_page_state(struct zone
*zone
,
163 enum zone_stat_item item
)
165 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
174 * More accurate version that also considers the currently pending
175 * deltas. For that we need to loop over all cpus to find the current
176 * deltas. There is no synchronization so the result cannot be
177 * exactly accurate either.
179 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
180 enum zone_stat_item item
)
182 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
186 for_each_online_cpu(cpu
)
187 x
+= per_cpu_ptr(zone
->pageset
, cpu
)->vm_stat_diff
[item
];
195 extern unsigned long global_reclaimable_pages(void);
196 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
200 * Determine the per node value of a stat item. This function
201 * is called frequently in a NUMA machine, so try to be as
202 * frugal as possible.
204 static inline unsigned long node_page_state(int node
,
205 enum zone_stat_item item
)
207 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
210 #ifdef CONFIG_ZONE_DMA
211 zone_page_state(&zones
[ZONE_DMA
], item
) +
213 #ifdef CONFIG_ZONE_DMA32
214 zone_page_state(&zones
[ZONE_DMA32
], item
) +
216 #ifdef CONFIG_HIGHMEM
217 zone_page_state(&zones
[ZONE_HIGHMEM
], item
) +
219 zone_page_state(&zones
[ZONE_NORMAL
], item
) +
220 zone_page_state(&zones
[ZONE_MOVABLE
], item
);
223 extern void zone_statistics(struct zone
*, struct zone
*);
227 #define node_page_state(node, item) global_page_state(item)
228 #define zone_statistics(_zl,_z) do { } while (0)
230 #endif /* CONFIG_NUMA */
232 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
233 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
235 static inline void zap_zone_vm_stats(struct zone
*zone
)
237 memset(zone
->vm_stat
, 0, sizeof(zone
->vm_stat
));
240 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
243 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, int);
244 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
245 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
247 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, int);
248 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
249 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
251 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
252 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
253 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
254 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
256 void refresh_cpu_vm_stats(int);
257 #else /* CONFIG_SMP */
260 * We do not maintain differentials in a single processor configuration.
261 * The functions directly modify the zone and global counters.
263 static inline void __mod_zone_page_state(struct zone
*zone
,
264 enum zone_stat_item item
, int delta
)
266 zone_page_state_add(delta
, zone
, item
);
269 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
271 atomic_long_inc(&zone
->vm_stat
[item
]);
272 atomic_long_inc(&vm_stat
[item
]);
275 static inline void __inc_zone_page_state(struct page
*page
,
276 enum zone_stat_item item
)
278 __inc_zone_state(page_zone(page
), item
);
281 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
283 atomic_long_dec(&zone
->vm_stat
[item
]);
284 atomic_long_dec(&vm_stat
[item
]);
287 static inline void __dec_zone_page_state(struct page
*page
,
288 enum zone_stat_item item
)
290 __dec_zone_state(page_zone(page
), item
);
294 * We only use atomic operations to update counters. So there is no need to
295 * disable interrupts.
297 #define inc_zone_page_state __inc_zone_page_state
298 #define dec_zone_page_state __dec_zone_page_state
299 #define mod_zone_page_state __mod_zone_page_state
301 static inline void refresh_cpu_vm_stats(int cpu
) { }
304 #endif /* _LINUX_VMSTAT_H */