[SCSI] drivers/scsi: Use ARRAY_SIZE macro
[linux-2.6/kvm.git] / include / linux / page-flags.h
blobd276a4e2f825030593c89bef5cb216803b5d4a4e
1 /*
2 * Macros for manipulating and testing page->flags
3 */
5 #ifndef PAGE_FLAGS_H
6 #define PAGE_FLAGS_H
8 #include <linux/percpu.h>
9 #include <linux/cache.h>
10 #include <asm/pgtable.h>
13 * Various page->flags bits:
15 * PG_reserved is set for special pages, which can never be swapped out. Some
16 * of them might not even exist (eg empty_bad_page)...
18 * The PG_private bitflag is set if page->private contains a valid value.
20 * During disk I/O, PG_locked is used. This bit is set before I/O and
21 * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
22 * waiting for the I/O on this page to complete.
24 * PG_uptodate tells whether the page's contents is valid. When a read
25 * completes, the page becomes uptodate, unless a disk I/O error happened.
27 * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
28 * which is set any time the system accesses that page through the (mapping,
29 * index) hash table. This referenced bit, together with the referenced bit
30 * in the page tables, is used to manipulate page->age and move the page across
31 * the active, inactive_dirty and inactive_clean lists.
33 * Note that the referenced bit, the page->lru list_head and the active,
34 * inactive_dirty and inactive_clean lists are protected by the
35 * zone->lru_lock, and *NOT* by the usual PG_locked bit!
37 * PG_error is set to indicate that an I/O error occurred on this page.
39 * PG_arch_1 is an architecture specific page state bit. The generic code
40 * guarantees that this bit is cleared for a page when it first is entered into
41 * the page cache.
43 * PG_highmem pages are not permanently mapped into the kernel virtual address
44 * space, they need to be kmapped separately for doing IO on the pages. The
45 * struct page (these bits with information) are always mapped into kernel
46 * address space...
50 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
51 * locked- and dirty-page accounting.
53 * The page flags field is split into two parts, the main flags area
54 * which extends from the low bits upwards, and the fields area which
55 * extends from the high bits downwards.
57 * | FIELD | ... | FLAGS |
58 * N-1 ^ 0
59 * (N-FLAGS_RESERVED)
61 * The fields area is reserved for fields mapping zone, node and SPARSEMEM
62 * section. The boundry between these two areas is defined by
63 * FLAGS_RESERVED which defines the width of the fields section
64 * (see linux/mmzone.h). New flags must _not_ overlap with this area.
66 #define PG_locked 0 /* Page is locked. Don't touch. */
67 #define PG_error 1
68 #define PG_referenced 2
69 #define PG_uptodate 3
71 #define PG_dirty 4
72 #define PG_lru 5
73 #define PG_active 6
74 #define PG_slab 7 /* slab debug (Suparna wants this) */
76 #define PG_checked 8 /* kill me in 2.5.<early>. */
77 #define PG_arch_1 9
78 #define PG_reserved 10
79 #define PG_private 11 /* Has something at ->private */
81 #define PG_writeback 12 /* Page is under writeback */
82 #define PG_nosave 13 /* Used for system suspend/resume */
83 #define PG_compound 14 /* Part of a compound page */
84 #define PG_swapcache 15 /* Swap page: swp_entry_t in private */
86 #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
87 #define PG_reclaim 17 /* To be reclaimed asap */
88 #define PG_nosave_free 18 /* Free, should not be written */
89 #define PG_buddy 19 /* Page is free, on buddy lists */
91 #define PG_uncached 20 /* Page has been mapped as uncached */
94 * Global page accounting. One instance per CPU. Only unsigned longs are
95 * allowed.
97 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
98 * any time safely (which protects the instance from modification by
99 * interrupt.
100 * - The __xxx_page_state variants can be used safely when interrupts are
101 * disabled.
102 * - The __xxx_page_state variants can be used if the field is only
103 * modified from process context and protected from preemption, or only
104 * modified from interrupt context. In this case, the field should be
105 * commented here.
107 struct page_state {
108 unsigned long nr_dirty; /* Dirty writeable pages */
109 unsigned long nr_writeback; /* Pages under writeback */
110 unsigned long nr_unstable; /* NFS unstable pages */
111 unsigned long nr_page_table_pages;/* Pages used for pagetables */
112 unsigned long nr_mapped; /* mapped into pagetables.
113 * only modified from process context */
114 unsigned long nr_slab; /* In slab */
115 #define GET_PAGE_STATE_LAST nr_slab
118 * The below are zeroed by get_page_state(). Use get_full_page_state()
119 * to add up all these.
121 unsigned long pgpgin; /* Disk reads */
122 unsigned long pgpgout; /* Disk writes */
123 unsigned long pswpin; /* swap reads */
124 unsigned long pswpout; /* swap writes */
126 unsigned long pgalloc_high; /* page allocations */
127 unsigned long pgalloc_normal;
128 unsigned long pgalloc_dma32;
129 unsigned long pgalloc_dma;
131 unsigned long pgfree; /* page freeings */
132 unsigned long pgactivate; /* pages moved inactive->active */
133 unsigned long pgdeactivate; /* pages moved active->inactive */
135 unsigned long pgfault; /* faults (major+minor) */
136 unsigned long pgmajfault; /* faults (major only) */
138 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
139 unsigned long pgrefill_normal;
140 unsigned long pgrefill_dma32;
141 unsigned long pgrefill_dma;
143 unsigned long pgsteal_high; /* total highmem pages reclaimed */
144 unsigned long pgsteal_normal;
145 unsigned long pgsteal_dma32;
146 unsigned long pgsteal_dma;
148 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
149 unsigned long pgscan_kswapd_normal;
150 unsigned long pgscan_kswapd_dma32;
151 unsigned long pgscan_kswapd_dma;
153 unsigned long pgscan_direct_high;/* total highmem pages scanned */
154 unsigned long pgscan_direct_normal;
155 unsigned long pgscan_direct_dma32;
156 unsigned long pgscan_direct_dma;
158 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
159 unsigned long slabs_scanned; /* slab objects scanned */
160 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
161 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
162 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
163 unsigned long allocstall; /* direct reclaim calls */
165 unsigned long pgrotated; /* pages rotated to tail of the LRU */
166 unsigned long nr_bounce; /* pages for bounce buffers */
169 extern void get_page_state(struct page_state *ret);
170 extern void get_page_state_node(struct page_state *ret, int node);
171 extern void get_full_page_state(struct page_state *ret);
172 extern unsigned long read_page_state_offset(unsigned long offset);
173 extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
174 extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
176 #define read_page_state(member) \
177 read_page_state_offset(offsetof(struct page_state, member))
179 #define mod_page_state(member, delta) \
180 mod_page_state_offset(offsetof(struct page_state, member), (delta))
182 #define __mod_page_state(member, delta) \
183 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
185 #define inc_page_state(member) mod_page_state(member, 1UL)
186 #define dec_page_state(member) mod_page_state(member, 0UL - 1)
187 #define add_page_state(member,delta) mod_page_state(member, (delta))
188 #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
190 #define __inc_page_state(member) __mod_page_state(member, 1UL)
191 #define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
192 #define __add_page_state(member,delta) __mod_page_state(member, (delta))
193 #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
195 #define page_state(member) (*__page_state(offsetof(struct page_state, member)))
197 #define state_zone_offset(zone, member) \
198 ({ \
199 unsigned offset; \
200 if (is_highmem(zone)) \
201 offset = offsetof(struct page_state, member##_high); \
202 else if (is_normal(zone)) \
203 offset = offsetof(struct page_state, member##_normal); \
204 else if (is_dma32(zone)) \
205 offset = offsetof(struct page_state, member##_dma32); \
206 else \
207 offset = offsetof(struct page_state, member##_dma); \
208 offset; \
211 #define __mod_page_state_zone(zone, member, delta) \
212 do { \
213 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
214 } while (0)
216 #define mod_page_state_zone(zone, member, delta) \
217 do { \
218 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
219 } while (0)
222 * Manipulation of page state flags
224 #define PageLocked(page) \
225 test_bit(PG_locked, &(page)->flags)
226 #define SetPageLocked(page) \
227 set_bit(PG_locked, &(page)->flags)
228 #define TestSetPageLocked(page) \
229 test_and_set_bit(PG_locked, &(page)->flags)
230 #define ClearPageLocked(page) \
231 clear_bit(PG_locked, &(page)->flags)
232 #define TestClearPageLocked(page) \
233 test_and_clear_bit(PG_locked, &(page)->flags)
235 #define PageError(page) test_bit(PG_error, &(page)->flags)
236 #define SetPageError(page) set_bit(PG_error, &(page)->flags)
237 #define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
239 #define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
240 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
241 #define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
242 #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
244 #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
245 #ifndef SetPageUptodate
246 #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
247 #endif
248 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
250 #define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
251 #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
252 #define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
253 #define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
254 #define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
255 #define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
257 #define PageLRU(page) test_bit(PG_lru, &(page)->flags)
258 #define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
259 #define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
260 #define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
262 #define PageActive(page) test_bit(PG_active, &(page)->flags)
263 #define SetPageActive(page) set_bit(PG_active, &(page)->flags)
264 #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
265 #define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
267 #define PageSlab(page) test_bit(PG_slab, &(page)->flags)
268 #define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
269 #define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
271 #ifdef CONFIG_HIGHMEM
272 #define PageHighMem(page) is_highmem(page_zone(page))
273 #else
274 #define PageHighMem(page) 0 /* needed to optimize away at compile time */
275 #endif
277 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
278 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
279 #define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
281 #define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
282 #define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
283 #define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
284 #define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags)
286 #define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
287 #define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
288 #define PagePrivate(page) test_bit(PG_private, &(page)->flags)
289 #define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags)
290 #define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
292 #define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
293 #define SetPageWriteback(page) \
294 do { \
295 if (!test_and_set_bit(PG_writeback, \
296 &(page)->flags)) \
297 inc_page_state(nr_writeback); \
298 } while (0)
299 #define TestSetPageWriteback(page) \
300 ({ \
301 int ret; \
302 ret = test_and_set_bit(PG_writeback, \
303 &(page)->flags); \
304 if (!ret) \
305 inc_page_state(nr_writeback); \
306 ret; \
308 #define ClearPageWriteback(page) \
309 do { \
310 if (test_and_clear_bit(PG_writeback, \
311 &(page)->flags)) \
312 dec_page_state(nr_writeback); \
313 } while (0)
314 #define TestClearPageWriteback(page) \
315 ({ \
316 int ret; \
317 ret = test_and_clear_bit(PG_writeback, \
318 &(page)->flags); \
319 if (ret) \
320 dec_page_state(nr_writeback); \
321 ret; \
324 #define PageNosave(page) test_bit(PG_nosave, &(page)->flags)
325 #define SetPageNosave(page) set_bit(PG_nosave, &(page)->flags)
326 #define TestSetPageNosave(page) test_and_set_bit(PG_nosave, &(page)->flags)
327 #define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags)
328 #define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags)
330 #define PageNosaveFree(page) test_bit(PG_nosave_free, &(page)->flags)
331 #define SetPageNosaveFree(page) set_bit(PG_nosave_free, &(page)->flags)
332 #define ClearPageNosaveFree(page) clear_bit(PG_nosave_free, &(page)->flags)
334 #define PageBuddy(page) test_bit(PG_buddy, &(page)->flags)
335 #define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags)
336 #define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags)
338 #define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
339 #define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
340 #define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
342 #define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
343 #define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
344 #define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
345 #define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
347 #define PageCompound(page) test_bit(PG_compound, &(page)->flags)
348 #define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
349 #define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
351 #ifdef CONFIG_SWAP
352 #define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
353 #define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
354 #define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
355 #else
356 #define PageSwapCache(page) 0
357 #endif
359 #define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
360 #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
361 #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
363 struct page; /* forward declaration */
365 int test_clear_page_dirty(struct page *page);
366 int test_clear_page_writeback(struct page *page);
367 int test_set_page_writeback(struct page *page);
369 static inline void clear_page_dirty(struct page *page)
371 test_clear_page_dirty(page);
374 static inline void set_page_writeback(struct page *page)
376 test_set_page_writeback(page);
379 #endif /* PAGE_FLAGS_H */