memcg: fix calculation of active_ratio
[linux-2.6/kvm.git] / include / linux / memcontrol.h
blob056cf82c0e863a2bfa92b5af0ec96580c0d91a2e
1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 struct mem_cgroup;
24 struct page_cgroup;
25 struct page;
26 struct mm_struct;
28 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
30 * All "charge" functions with gfp_mask should use GFP_KERNEL or
31 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
32 * alloc memory but reclaims memory from all available zones. So, "where I want
33 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
34 * available but adding a rule is better. charge functions' gfp_mask should
35 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
36 * codes.
37 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
40 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
41 gfp_t gfp_mask);
42 /* for swap handling */
43 extern int mem_cgroup_try_charge(struct mm_struct *mm,
44 gfp_t gfp_mask, struct mem_cgroup **ptr);
45 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
46 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
47 extern void mem_cgroup_commit_charge_swapin(struct page *page,
48 struct mem_cgroup *ptr);
49 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
51 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
52 gfp_t gfp_mask);
53 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
54 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
55 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
56 extern void mem_cgroup_del_lru(struct page *page);
57 extern void mem_cgroup_move_lists(struct page *page,
58 enum lru_list from, enum lru_list to);
59 extern void mem_cgroup_uncharge_page(struct page *page);
60 extern void mem_cgroup_uncharge_cache_page(struct page *page);
61 extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
63 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
64 struct list_head *dst,
65 unsigned long *scanned, int order,
66 int mode, struct zone *z,
67 struct mem_cgroup *mem_cont,
68 int active, int file);
69 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
70 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
72 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
74 static inline
75 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
77 struct mem_cgroup *mem;
78 rcu_read_lock();
79 mem = mem_cgroup_from_task((mm)->owner);
80 rcu_read_unlock();
81 return cgroup == mem;
84 extern int
85 mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
86 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
87 struct page *oldpage, struct page *newpage);
90 * For memory reclaim.
92 extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
93 extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
95 extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
96 extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
97 int priority);
98 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
99 int priority);
100 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
101 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
102 struct zone *zone,
103 enum lru_list lru);
104 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
105 struct zone *zone);
106 struct zone_reclaim_stat*
107 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
109 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
110 extern int do_swap_account;
111 #endif
113 static inline bool mem_cgroup_disabled(void)
115 if (mem_cgroup_subsys.disabled)
116 return true;
117 return false;
120 extern bool mem_cgroup_oom_called(struct task_struct *task);
122 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
123 struct mem_cgroup;
125 static inline int mem_cgroup_newpage_charge(struct page *page,
126 struct mm_struct *mm, gfp_t gfp_mask)
128 return 0;
131 static inline int mem_cgroup_cache_charge(struct page *page,
132 struct mm_struct *mm, gfp_t gfp_mask)
134 return 0;
137 static inline int mem_cgroup_try_charge(struct mm_struct *mm,
138 gfp_t gfp_mask, struct mem_cgroup **ptr)
140 return 0;
143 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
144 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
146 return 0;
149 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
150 struct mem_cgroup *ptr)
154 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
158 static inline void mem_cgroup_uncharge_page(struct page *page)
162 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
166 static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
168 return 0;
171 static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
175 static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
177 return ;
180 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
182 return ;
185 static inline void mem_cgroup_del_lru(struct page *page)
187 return ;
190 static inline void
191 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
195 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
197 return 1;
200 static inline int task_in_mem_cgroup(struct task_struct *task,
201 const struct mem_cgroup *mem)
203 return 1;
206 static inline int
207 mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
209 return 0;
212 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
213 struct page *oldpage,
214 struct page *newpage)
218 static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
220 return 0;
223 static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
225 return 0;
228 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
230 return 0;
233 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
234 int priority)
238 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
239 int priority)
243 static inline bool mem_cgroup_disabled(void)
245 return true;
248 static inline bool mem_cgroup_oom_called(struct task_struct *task)
250 return false;
253 static inline int
254 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
256 return 1;
259 static inline unsigned long
260 mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
261 enum lru_list lru)
263 return 0;
267 static inline struct zone_reclaim_stat*
268 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
270 return NULL;
273 static inline struct zone_reclaim_stat*
274 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
276 return NULL;
279 #endif /* CONFIG_CGROUP_MEM_CONT */
281 #endif /* _LINUX_MEMCONTROL_H */