1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
28 /* Stats that can be updated by kernel. */
29 enum mem_cgroup_page_stat_item
{
30 MEMCG_NR_FILE_MAPPED
, /* # of pages charged as file rss */
33 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan
,
34 struct list_head
*dst
,
35 unsigned long *scanned
, int order
,
36 int mode
, struct zone
*z
,
37 struct mem_cgroup
*mem_cont
,
38 int active
, int file
);
40 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
42 * All "charge" functions with gfp_mask should use GFP_KERNEL or
43 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
44 * alloc memory but reclaims memory from all available zones. So, "where I want
45 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
46 * available but adding a rule is better. charge functions' gfp_mask should
47 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
52 extern int mem_cgroup_newpage_charge(struct page
*page
, struct mm_struct
*mm
,
54 /* for swap handling */
55 extern int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
56 struct page
*page
, gfp_t mask
, struct mem_cgroup
**ptr
);
57 extern void mem_cgroup_commit_charge_swapin(struct page
*page
,
58 struct mem_cgroup
*ptr
);
59 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*ptr
);
61 extern int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
63 extern void mem_cgroup_add_lru_list(struct page
*page
, enum lru_list lru
);
64 extern void mem_cgroup_del_lru_list(struct page
*page
, enum lru_list lru
);
65 extern void mem_cgroup_rotate_reclaimable_page(struct page
*page
);
66 extern void mem_cgroup_rotate_lru_list(struct page
*page
, enum lru_list lru
);
67 extern void mem_cgroup_del_lru(struct page
*page
);
68 extern void mem_cgroup_move_lists(struct page
*page
,
69 enum lru_list from
, enum lru_list to
);
71 /* For coalescing uncharge for reducing memcg' overhead*/
72 extern void mem_cgroup_uncharge_start(void);
73 extern void mem_cgroup_uncharge_end(void);
75 extern void mem_cgroup_uncharge_page(struct page
*page
);
76 extern void mem_cgroup_uncharge_cache_page(struct page
*page
);
77 extern int mem_cgroup_shmem_charge_fallback(struct page
*page
,
78 struct mm_struct
*mm
, gfp_t gfp_mask
);
80 extern void mem_cgroup_out_of_memory(struct mem_cgroup
*mem
, gfp_t gfp_mask
);
81 int task_in_mem_cgroup(struct task_struct
*task
, const struct mem_cgroup
*mem
);
83 extern struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
);
84 extern struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
87 int mm_match_cgroup(const struct mm_struct
*mm
, const struct mem_cgroup
*cgroup
)
89 struct mem_cgroup
*mem
;
91 mem
= mem_cgroup_from_task(rcu_dereference((mm
)->owner
));
96 extern struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*mem
);
99 mem_cgroup_prepare_migration(struct page
*page
,
100 struct page
*newpage
, struct mem_cgroup
**ptr
, gfp_t gfp_mask
);
101 extern void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
102 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
);
105 * For memory reclaim.
107 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
);
108 int mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
);
109 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
110 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
,
113 struct zone_reclaim_stat
*mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
,
115 struct zone_reclaim_stat
*
116 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
);
117 extern void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
,
118 struct task_struct
*p
);
120 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
121 extern int do_swap_account
;
124 static inline bool mem_cgroup_disabled(void)
126 if (mem_cgroup_subsys
.disabled
)
131 void mem_cgroup_update_page_stat(struct page
*page
,
132 enum mem_cgroup_page_stat_item idx
,
135 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
136 enum mem_cgroup_page_stat_item idx
)
138 mem_cgroup_update_page_stat(page
, idx
, 1);
141 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
142 enum mem_cgroup_page_stat_item idx
)
144 mem_cgroup_update_page_stat(page
, idx
, -1);
147 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
149 unsigned long *total_scanned
);
150 u64
mem_cgroup_get_limit(struct mem_cgroup
*mem
);
152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
153 void mem_cgroup_split_huge_fixup(struct page
*head
, struct page
*tail
);
156 #ifdef CONFIG_DEBUG_VM
157 bool mem_cgroup_bad_page_check(struct page
*page
);
158 void mem_cgroup_print_bad_page(struct page
*page
);
160 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
163 static inline int mem_cgroup_newpage_charge(struct page
*page
,
164 struct mm_struct
*mm
, gfp_t gfp_mask
)
169 static inline int mem_cgroup_cache_charge(struct page
*page
,
170 struct mm_struct
*mm
, gfp_t gfp_mask
)
175 static inline int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
176 struct page
*page
, gfp_t gfp_mask
, struct mem_cgroup
**ptr
)
181 static inline void mem_cgroup_commit_charge_swapin(struct page
*page
,
182 struct mem_cgroup
*ptr
)
186 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*ptr
)
190 static inline void mem_cgroup_uncharge_start(void)
194 static inline void mem_cgroup_uncharge_end(void)
198 static inline void mem_cgroup_uncharge_page(struct page
*page
)
202 static inline void mem_cgroup_uncharge_cache_page(struct page
*page
)
206 static inline int mem_cgroup_shmem_charge_fallback(struct page
*page
,
207 struct mm_struct
*mm
, gfp_t gfp_mask
)
212 static inline void mem_cgroup_add_lru_list(struct page
*page
, int lru
)
216 static inline void mem_cgroup_del_lru_list(struct page
*page
, int lru
)
221 static inline void mem_cgroup_rotate_reclaimable_page(struct page
*page
)
226 static inline void mem_cgroup_rotate_lru_list(struct page
*page
, int lru
)
231 static inline void mem_cgroup_del_lru(struct page
*page
)
237 mem_cgroup_move_lists(struct page
*page
, enum lru_list from
, enum lru_list to
)
241 static inline struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
)
246 static inline int mm_match_cgroup(struct mm_struct
*mm
, struct mem_cgroup
*mem
)
251 static inline int task_in_mem_cgroup(struct task_struct
*task
,
252 const struct mem_cgroup
*mem
)
257 static inline struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*mem
)
263 mem_cgroup_prepare_migration(struct page
*page
, struct page
*newpage
,
264 struct mem_cgroup
**ptr
, gfp_t gfp_mask
)
269 static inline void mem_cgroup_end_migration(struct mem_cgroup
*mem
,
270 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
)
274 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup
*mem
)
279 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup
*mem
,
284 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup
*mem
,
289 static inline bool mem_cgroup_disabled(void)
295 mem_cgroup_inactive_anon_is_low(struct mem_cgroup
*memcg
)
301 mem_cgroup_inactive_file_is_low(struct mem_cgroup
*memcg
)
306 static inline unsigned long
307 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup
*memcg
, struct zone
*zone
,
314 static inline struct zone_reclaim_stat
*
315 mem_cgroup_get_reclaim_stat(struct mem_cgroup
*memcg
, struct zone
*zone
)
320 static inline struct zone_reclaim_stat
*
321 mem_cgroup_get_reclaim_stat_from_page(struct page
*page
)
327 mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
331 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
332 enum mem_cgroup_page_stat_item idx
)
336 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
337 enum mem_cgroup_page_stat_item idx
)
342 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
344 unsigned long *total_scanned
)
350 u64
mem_cgroup_get_limit(struct mem_cgroup
*mem
)
355 static inline void mem_cgroup_split_huge_fixup(struct page
*head
,
360 #endif /* CONFIG_CGROUP_MEM_CONT */
362 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
364 mem_cgroup_bad_page_check(struct page
*page
)
370 mem_cgroup_print_bad_page(struct page
*page
)
375 #endif /* _LINUX_MEMCONTROL_H */