Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / linux / memcontrol.h
blob8b1c4295848b77b6808c1f451434e02e599fffb2
1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
23 struct mem_cgroup;
24 struct page_cgroup;
25 struct page;
26 struct mm_struct;
28 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
30 extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
31 extern void mm_free_cgroup(struct mm_struct *mm);
33 #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
35 extern struct page_cgroup *page_get_page_cgroup(struct page *page);
36 extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
37 gfp_t gfp_mask);
38 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
39 gfp_t gfp_mask);
40 extern void mem_cgroup_uncharge_page(struct page *page);
41 extern void mem_cgroup_move_lists(struct page *page, bool active);
42 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
43 struct list_head *dst,
44 unsigned long *scanned, int order,
45 int mode, struct zone *z,
46 struct mem_cgroup *mem_cont,
47 int active);
48 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
49 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
51 #define mm_match_cgroup(mm, cgroup) \
52 ((cgroup) == rcu_dereference((mm)->mem_cgroup))
54 extern int mem_cgroup_prepare_migration(struct page *page);
55 extern void mem_cgroup_end_migration(struct page *page);
56 extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
59 * For memory reclaim.
61 extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
62 extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
64 extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
65 extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
66 int priority);
67 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
68 int priority);
70 extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
71 struct zone *zone, int priority);
72 extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
73 struct zone *zone, int priority);
75 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
76 static inline void mm_init_cgroup(struct mm_struct *mm,
77 struct task_struct *p)
81 static inline void mm_free_cgroup(struct mm_struct *mm)
85 static inline void page_reset_bad_cgroup(struct page *page)
89 static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
91 return NULL;
94 static inline int mem_cgroup_charge(struct page *page,
95 struct mm_struct *mm, gfp_t gfp_mask)
97 return 0;
100 static inline int mem_cgroup_cache_charge(struct page *page,
101 struct mm_struct *mm, gfp_t gfp_mask)
103 return 0;
106 static inline void mem_cgroup_uncharge_page(struct page *page)
110 static inline void mem_cgroup_move_lists(struct page *page, bool active)
114 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
116 return 1;
119 static inline int task_in_mem_cgroup(struct task_struct *task,
120 const struct mem_cgroup *mem)
122 return 1;
125 static inline int mem_cgroup_prepare_migration(struct page *page)
127 return 0;
130 static inline void mem_cgroup_end_migration(struct page *page)
134 static inline void
135 mem_cgroup_page_migration(struct page *page, struct page *newpage)
139 static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
141 return 0;
144 static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
146 return 0;
149 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
151 return 0;
154 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
155 int priority)
159 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
160 int priority)
164 static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
165 struct zone *zone, int priority)
167 return 0;
170 static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
171 struct zone *zone, int priority)
173 return 0;
175 #endif /* CONFIG_CGROUP_MEM_CONT */
177 #endif /* _LINUX_MEMCONTROL_H */