mm: page allocator: calculate a better estimate of NR_FREE_PAGES when memory is low...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / vmstat.h
blob01dd4acc7ba60ef4ea6c0239cbe574290ab533aa
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 #ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41 #endif
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46 #ifdef CONFIG_HUGETLB_PAGE
47 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
48 #endif
49 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
50 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
51 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
52 UNEVICTABLE_PGMLOCKED,
53 UNEVICTABLE_PGMUNLOCKED,
54 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
55 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
56 UNEVICTABLE_MLOCKFREED,
57 NR_VM_EVENT_ITEMS
60 extern int sysctl_stat_interval;
62 #ifdef CONFIG_VM_EVENT_COUNTERS
64 * Light weight per cpu counter implementation.
66 * Counters should only be incremented and no critical kernel component
67 * should rely on the counter values.
69 * Counters are handled completely inline. On many platforms the code
70 * generated will simply be the increment of a global address.
73 struct vm_event_state {
74 unsigned long event[NR_VM_EVENT_ITEMS];
77 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
79 static inline void __count_vm_event(enum vm_event_item item)
81 __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
84 static inline void count_vm_event(enum vm_event_item item)
86 this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
89 static inline void __count_vm_events(enum vm_event_item item, long delta)
91 __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
94 static inline void count_vm_events(enum vm_event_item item, long delta)
96 this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
99 extern void all_vm_events(unsigned long *);
100 #ifdef CONFIG_HOTPLUG
101 extern void vm_events_fold_cpu(int cpu);
102 #else
103 static inline void vm_events_fold_cpu(int cpu)
106 #endif
108 #else
110 /* Disable counters */
111 static inline void count_vm_event(enum vm_event_item item)
114 static inline void count_vm_events(enum vm_event_item item, long delta)
117 static inline void __count_vm_event(enum vm_event_item item)
120 static inline void __count_vm_events(enum vm_event_item item, long delta)
123 static inline void all_vm_events(unsigned long *ret)
126 static inline void vm_events_fold_cpu(int cpu)
130 #endif /* CONFIG_VM_EVENT_COUNTERS */
132 #define __count_zone_vm_events(item, zone, delta) \
133 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
134 zone_idx(zone), delta)
137 * Zone based page accounting with per cpu differentials.
139 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
141 static inline void zone_page_state_add(long x, struct zone *zone,
142 enum zone_stat_item item)
144 atomic_long_add(x, &zone->vm_stat[item]);
145 atomic_long_add(x, &vm_stat[item]);
148 static inline unsigned long global_page_state(enum zone_stat_item item)
150 long x = atomic_long_read(&vm_stat[item]);
151 #ifdef CONFIG_SMP
152 if (x < 0)
153 x = 0;
154 #endif
155 return x;
158 static inline unsigned long zone_page_state(struct zone *zone,
159 enum zone_stat_item item)
161 long x = atomic_long_read(&zone->vm_stat[item]);
162 #ifdef CONFIG_SMP
163 if (x < 0)
164 x = 0;
165 #endif
166 return x;
170 * More accurate version that also considers the currently pending
171 * deltas. For that we need to loop over all cpus to find the current
172 * deltas. There is no synchronization so the result cannot be
173 * exactly accurate either.
175 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
176 enum zone_stat_item item)
178 long x = atomic_long_read(&zone->vm_stat[item]);
180 #ifdef CONFIG_SMP
181 int cpu;
182 for_each_online_cpu(cpu)
183 x += zone_pcp(zone, cpu)->vm_stat_diff[item];
185 if (x < 0)
186 x = 0;
187 #endif
188 return x;
191 extern unsigned long global_reclaimable_pages(void);
192 extern unsigned long zone_reclaimable_pages(struct zone *zone);
194 #ifdef CONFIG_NUMA
196 * Determine the per node value of a stat item. This function
197 * is called frequently in a NUMA machine, so try to be as
198 * frugal as possible.
200 static inline unsigned long node_page_state(int node,
201 enum zone_stat_item item)
203 struct zone *zones = NODE_DATA(node)->node_zones;
205 return
206 #ifdef CONFIG_ZONE_DMA
207 zone_page_state(&zones[ZONE_DMA], item) +
208 #endif
209 #ifdef CONFIG_ZONE_DMA32
210 zone_page_state(&zones[ZONE_DMA32], item) +
211 #endif
212 #ifdef CONFIG_HIGHMEM
213 zone_page_state(&zones[ZONE_HIGHMEM], item) +
214 #endif
215 zone_page_state(&zones[ZONE_NORMAL], item) +
216 zone_page_state(&zones[ZONE_MOVABLE], item);
219 extern void zone_statistics(struct zone *, struct zone *);
221 #else
223 #define node_page_state(node, item) global_page_state(item)
224 #define zone_statistics(_zl,_z) do { } while (0)
226 #endif /* CONFIG_NUMA */
228 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
229 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
231 static inline void zap_zone_vm_stats(struct zone *zone)
233 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
236 extern void inc_zone_state(struct zone *, enum zone_stat_item);
238 #ifdef CONFIG_SMP
239 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
240 void __inc_zone_page_state(struct page *, enum zone_stat_item);
241 void __dec_zone_page_state(struct page *, enum zone_stat_item);
243 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
244 void inc_zone_page_state(struct page *, enum zone_stat_item);
245 void dec_zone_page_state(struct page *, enum zone_stat_item);
247 extern void inc_zone_state(struct zone *, enum zone_stat_item);
248 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
249 extern void dec_zone_state(struct zone *, enum zone_stat_item);
250 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
252 void refresh_cpu_vm_stats(int);
253 #else /* CONFIG_SMP */
256 * We do not maintain differentials in a single processor configuration.
257 * The functions directly modify the zone and global counters.
259 static inline void __mod_zone_page_state(struct zone *zone,
260 enum zone_stat_item item, int delta)
262 zone_page_state_add(delta, zone, item);
265 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
267 atomic_long_inc(&zone->vm_stat[item]);
268 atomic_long_inc(&vm_stat[item]);
271 static inline void __inc_zone_page_state(struct page *page,
272 enum zone_stat_item item)
274 __inc_zone_state(page_zone(page), item);
277 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
279 atomic_long_dec(&zone->vm_stat[item]);
280 atomic_long_dec(&vm_stat[item]);
283 static inline void __dec_zone_page_state(struct page *page,
284 enum zone_stat_item item)
286 __dec_zone_state(page_zone(page), item);
290 * We only use atomic operations to update counters. So there is no need to
291 * disable interrupts.
293 #define inc_zone_page_state __inc_zone_page_state
294 #define dec_zone_page_state __dec_zone_page_state
295 #define mod_zone_page_state __mod_zone_page_state
297 static inline void refresh_cpu_vm_stats(int cpu) { }
298 #endif
300 #endif /* _LINUX_VMSTAT_H */