1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
19 #define DMA32_ZONE(xx)
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
25 #define HIGHMEM_ZONE(xx)
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
31 enum vm_event_item
{ PGPGIN
, PGPGOUT
, PSWPIN
, PSWPOUT
,
32 FOR_ALL_ZONES(PGALLOC
),
33 PGFREE
, PGACTIVATE
, PGDEACTIVATE
,
35 FOR_ALL_ZONES(PGREFILL
),
36 FOR_ALL_ZONES(PGSTEAL
),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD
),
38 FOR_ALL_ZONES(PGSCAN_DIRECT
),
40 PGSCAN_ZONE_RECLAIM_FAILED
,
42 PGINODESTEAL
, SLABS_SCANNED
, KSWAPD_STEAL
, KSWAPD_INODESTEAL
,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY
, KSWAPD_HIGH_WMARK_HIT_QUICKLY
,
44 KSWAPD_SKIP_CONGESTION_WAIT
,
45 PAGEOUTRUN
, ALLOCSTALL
, PGROTATED
,
46 #ifdef CONFIG_HUGETLB_PAGE
47 HTLB_BUDDY_PGALLOC
, HTLB_BUDDY_PGALLOC_FAIL
,
49 UNEVICTABLE_PGCULLED
, /* culled to noreclaim list */
50 UNEVICTABLE_PGSCANNED
, /* scanned for reclaimability */
51 UNEVICTABLE_PGRESCUED
, /* rescued from noreclaim list */
52 UNEVICTABLE_PGMLOCKED
,
53 UNEVICTABLE_PGMUNLOCKED
,
54 UNEVICTABLE_PGCLEARED
, /* on COW, page truncate */
55 UNEVICTABLE_PGSTRANDED
, /* unable to isolate on unlock */
56 UNEVICTABLE_MLOCKFREED
,
60 extern int sysctl_stat_interval
;
62 #ifdef CONFIG_VM_EVENT_COUNTERS
64 * Light weight per cpu counter implementation.
66 * Counters should only be incremented and no critical kernel component
67 * should rely on the counter values.
69 * Counters are handled completely inline. On many platforms the code
70 * generated will simply be the increment of a global address.
73 struct vm_event_state
{
74 unsigned long event
[NR_VM_EVENT_ITEMS
];
77 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
79 static inline void __count_vm_event(enum vm_event_item item
)
81 __this_cpu_inc(per_cpu_var(vm_event_states
).event
[item
]);
84 static inline void count_vm_event(enum vm_event_item item
)
86 this_cpu_inc(per_cpu_var(vm_event_states
).event
[item
]);
89 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
91 __this_cpu_add(per_cpu_var(vm_event_states
).event
[item
], delta
);
94 static inline void count_vm_events(enum vm_event_item item
, long delta
)
96 this_cpu_add(per_cpu_var(vm_event_states
).event
[item
], delta
);
99 extern void all_vm_events(unsigned long *);
100 #ifdef CONFIG_HOTPLUG
101 extern void vm_events_fold_cpu(int cpu
);
103 static inline void vm_events_fold_cpu(int cpu
)
110 /* Disable counters */
111 static inline void count_vm_event(enum vm_event_item item
)
114 static inline void count_vm_events(enum vm_event_item item
, long delta
)
117 static inline void __count_vm_event(enum vm_event_item item
)
120 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
123 static inline void all_vm_events(unsigned long *ret
)
126 static inline void vm_events_fold_cpu(int cpu
)
130 #endif /* CONFIG_VM_EVENT_COUNTERS */
132 #define __count_zone_vm_events(item, zone, delta) \
133 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
134 zone_idx(zone), delta)
137 * Zone based page accounting with per cpu differentials.
139 extern atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
141 static inline void zone_page_state_add(long x
, struct zone
*zone
,
142 enum zone_stat_item item
)
144 atomic_long_add(x
, &zone
->vm_stat
[item
]);
145 atomic_long_add(x
, &vm_stat
[item
]);
148 static inline unsigned long global_page_state(enum zone_stat_item item
)
150 long x
= atomic_long_read(&vm_stat
[item
]);
158 static inline unsigned long zone_page_state(struct zone
*zone
,
159 enum zone_stat_item item
)
161 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
169 extern unsigned long global_reclaimable_pages(void);
170 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
174 * Determine the per node value of a stat item. This function
175 * is called frequently in a NUMA machine, so try to be as
176 * frugal as possible.
178 static inline unsigned long node_page_state(int node
,
179 enum zone_stat_item item
)
181 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
184 #ifdef CONFIG_ZONE_DMA
185 zone_page_state(&zones
[ZONE_DMA
], item
) +
187 #ifdef CONFIG_ZONE_DMA32
188 zone_page_state(&zones
[ZONE_DMA32
], item
) +
190 #ifdef CONFIG_HIGHMEM
191 zone_page_state(&zones
[ZONE_HIGHMEM
], item
) +
193 zone_page_state(&zones
[ZONE_NORMAL
], item
) +
194 zone_page_state(&zones
[ZONE_MOVABLE
], item
);
197 extern void zone_statistics(struct zone
*, struct zone
*);
201 #define node_page_state(node, item) global_page_state(item)
202 #define zone_statistics(_zl,_z) do { } while (0)
204 #endif /* CONFIG_NUMA */
206 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
207 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
209 static inline void zap_zone_vm_stats(struct zone
*zone
)
211 memset(zone
->vm_stat
, 0, sizeof(zone
->vm_stat
));
214 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
217 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, int);
218 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
219 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
221 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, int);
222 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
223 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
225 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
226 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
227 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
228 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
230 void refresh_cpu_vm_stats(int);
231 #else /* CONFIG_SMP */
234 * We do not maintain differentials in a single processor configuration.
235 * The functions directly modify the zone and global counters.
237 static inline void __mod_zone_page_state(struct zone
*zone
,
238 enum zone_stat_item item
, int delta
)
240 zone_page_state_add(delta
, zone
, item
);
243 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
245 atomic_long_inc(&zone
->vm_stat
[item
]);
246 atomic_long_inc(&vm_stat
[item
]);
249 static inline void __inc_zone_page_state(struct page
*page
,
250 enum zone_stat_item item
)
252 __inc_zone_state(page_zone(page
), item
);
255 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
257 atomic_long_dec(&zone
->vm_stat
[item
]);
258 atomic_long_dec(&vm_stat
[item
]);
261 static inline void __dec_zone_page_state(struct page
*page
,
262 enum zone_stat_item item
)
264 __dec_zone_state(page_zone(page
), item
);
268 * We only use atomic operations to update counters. So there is no need to
269 * disable interrupts.
271 #define inc_zone_page_state __inc_zone_page_state
272 #define dec_zone_page_state __dec_zone_page_state
273 #define mod_zone_page_state __mod_zone_page_state
275 static inline void refresh_cpu_vm_stats(int cpu
) { }
278 #endif /* _LINUX_VMSTAT_H */