gma500: Medfield support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / page_cgroup.h
blob961ecc7d30bc2ec69b87699c39e5db63a5aff0f7
1 #ifndef __LINUX_PAGE_CGROUP_H
2 #define __LINUX_PAGE_CGROUP_H
4 enum {
5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
7 PCG_CACHE, /* charged as cache */
8 PCG_USED, /* this object is in use. */
9 PCG_MIGRATION, /* under page migration */
10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 /* No lock in page_cgroup */
14 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
15 __NR_PCG_FLAGS,
18 #ifndef __GENERATING_BOUNDS_H
19 #include <generated/bounds.h>
21 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
22 #include <linux/bit_spinlock.h>
25 * Page Cgroup can be considered as an extended mem_map.
26 * A page_cgroup page is associated with every page descriptor. The
27 * page_cgroup helps us identify information about the cgroup
28 * All page cgroups are allocated at boot or memory hotplug event,
29 * then the page cgroup for pfn always exists.
31 struct page_cgroup {
32 unsigned long flags;
33 struct mem_cgroup *mem_cgroup;
34 struct list_head lru; /* per cgroup LRU list */
37 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
39 #ifdef CONFIG_SPARSEMEM
40 static inline void __init page_cgroup_init_flatmem(void)
43 extern void __init page_cgroup_init(void);
44 #else
45 void __init page_cgroup_init_flatmem(void);
46 static inline void __init page_cgroup_init(void)
49 #endif
51 struct page_cgroup *lookup_page_cgroup(struct page *page);
52 struct page *lookup_cgroup_page(struct page_cgroup *pc);
54 #define TESTPCGFLAG(uname, lname) \
55 static inline int PageCgroup##uname(struct page_cgroup *pc) \
56 { return test_bit(PCG_##lname, &pc->flags); }
58 #define SETPCGFLAG(uname, lname) \
59 static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
60 { set_bit(PCG_##lname, &pc->flags); }
62 #define CLEARPCGFLAG(uname, lname) \
63 static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
64 { clear_bit(PCG_##lname, &pc->flags); }
66 #define TESTCLEARPCGFLAG(uname, lname) \
67 static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
68 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
70 /* Cache flag is set only once (at allocation) */
71 TESTPCGFLAG(Cache, CACHE)
72 CLEARPCGFLAG(Cache, CACHE)
73 SETPCGFLAG(Cache, CACHE)
75 TESTPCGFLAG(Used, USED)
76 CLEARPCGFLAG(Used, USED)
77 SETPCGFLAG(Used, USED)
79 SETPCGFLAG(AcctLRU, ACCT_LRU)
80 CLEARPCGFLAG(AcctLRU, ACCT_LRU)
81 TESTPCGFLAG(AcctLRU, ACCT_LRU)
82 TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
85 SETPCGFLAG(FileMapped, FILE_MAPPED)
86 CLEARPCGFLAG(FileMapped, FILE_MAPPED)
87 TESTPCGFLAG(FileMapped, FILE_MAPPED)
89 SETPCGFLAG(Migration, MIGRATION)
90 CLEARPCGFLAG(Migration, MIGRATION)
91 TESTPCGFLAG(Migration, MIGRATION)
93 static inline void lock_page_cgroup(struct page_cgroup *pc)
96 * Don't take this lock in IRQ context.
97 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
99 bit_spin_lock(PCG_LOCK, &pc->flags);
102 static inline void unlock_page_cgroup(struct page_cgroup *pc)
104 bit_spin_unlock(PCG_LOCK, &pc->flags);
107 static inline void move_lock_page_cgroup(struct page_cgroup *pc,
108 unsigned long *flags)
111 * We know updates to pc->flags of page cache's stats are from both of
112 * usual context or IRQ context. Disable IRQ to avoid deadlock.
114 local_irq_save(*flags);
115 bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
118 static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
119 unsigned long *flags)
121 bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
122 local_irq_restore(*flags);
125 #ifdef CONFIG_SPARSEMEM
126 #define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
127 #else
128 #define PCG_ARRAYID_WIDTH NODES_SHIFT
129 #endif
131 #if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
132 #error Not enough space left in pc->flags to store page_cgroup array IDs
133 #endif
135 /* pc->flags: ARRAY-ID | FLAGS */
137 #define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
139 #define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
141 * Zero the shift count for non-existent fields, to prevent compiler
142 * warnings and ensure references are optimized away.
144 #define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
146 static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
147 unsigned long id)
149 pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
150 pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
153 static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
155 return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
158 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
159 struct page_cgroup;
161 static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
165 static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
167 return NULL;
170 static inline void page_cgroup_init(void)
174 static inline void __init page_cgroup_init_flatmem(void)
178 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
180 #include <linux/swap.h>
182 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
183 extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
184 unsigned short old, unsigned short new);
185 extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
186 extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
187 extern int swap_cgroup_swapon(int type, unsigned long max_pages);
188 extern void swap_cgroup_swapoff(int type);
189 #else
191 static inline
192 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
194 return 0;
197 static inline
198 unsigned short lookup_swap_cgroup(swp_entry_t ent)
200 return 0;
203 static inline int
204 swap_cgroup_swapon(int type, unsigned long max_pages)
206 return 0;
209 static inline void swap_cgroup_swapoff(int type)
211 return;
214 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
216 #endif /* !__GENERATING_BOUNDS_H */
218 #endif /* __LINUX_PAGE_CGROUP_H */