allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / include / linux / mm_types.h
blobd5bb1796e12b19a1dde4960c2f10d0064a8867b9
1 #ifndef _LINUX_MM_TYPES_H
2 #define _LINUX_MM_TYPES_H
4 #include <linux/types.h>
5 #include <linux/threads.h>
6 #include <linux/list.h>
7 #include <linux/spinlock.h>
9 struct address_space;
12 * Each physical page in the system has a struct page associated with
13 * it to keep track of whatever it is we are using the page for at the
14 * moment. Note that we have no way to track which tasks are using
15 * a page, though if it is a pagecache page, rmap structures can tell us
16 * who is mapping it.
18 struct page {
19 unsigned long flags; /* Atomic flags, some possibly
20 * updated asynchronously */
21 atomic_t _count; /* Usage count, see below. */
22 union {
23 atomic_t _mapcount; /* Count of ptes mapped in mms,
24 * to show when page is mapped
25 * & limit reverse map searches.
27 struct { /* SLUB uses */
28 short unsigned int inuse;
29 short unsigned int offset;
32 union {
33 struct {
34 unsigned long private; /* Mapping-private opaque data:
35 * usually used for buffer_heads
36 * if PagePrivate set; used for
37 * swp_entry_t if PageSwapCache;
38 * indicates order in the buddy
39 * system if PG_buddy is set.
41 struct address_space *mapping; /* If low bit clear, points to
42 * inode address_space, or NULL.
43 * If page mapped as anonymous
44 * memory, low bit is set, and
45 * it points to anon_vma object:
46 * see PAGE_MAPPING_ANON below.
49 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
50 spinlock_t ptl;
51 #endif
52 struct { /* SLUB uses */
53 void **lockless_freelist;
54 struct kmem_cache *slab; /* Pointer to slab */
56 struct {
57 struct page *first_page; /* Compound pages */
60 union {
61 pgoff_t index; /* Our offset within mapping. */
62 void *freelist; /* SLUB: freelist req. slab lock */
64 struct list_head lru; /* Pageout list, eg. active_list
65 * protected by zone->lru_lock !
68 * On machines where all RAM is mapped into kernel address space,
69 * we can simply calculate the virtual address. On machines with
70 * highmem some memory is mapped into kernel virtual memory
71 * dynamically, so we need a place to store that address.
72 * Note that this field could be 16 bits on x86 ... ;)
74 * Architectures with slow multiplication can define
75 * WANT_PAGE_VIRTUAL in asm/page.h
77 #if defined(WANT_PAGE_VIRTUAL)
78 void *virtual; /* Kernel virtual address (NULL if
79 not kmapped, ie. highmem) */
80 #endif /* WANT_PAGE_VIRTUAL */
83 #endif /* _LINUX_MM_TYPES_H */