byteorder: remove direct includes of linux/byteorder/swab[b].h
[linux-2.6/linux-2.6-openrd.git] / mm / page_cgroup.c
blob5d86550701f2fb40f60c649b4aa91c56824a9e3b
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/memory.h>
9 static void __meminit
10 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
12 pc->flags = 0;
13 pc->mem_cgroup = NULL;
14 pc->page = pfn_to_page(pfn);
16 static unsigned long total_usage;
18 #if !defined(CONFIG_SPARSEMEM)
21 void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
23 pgdat->node_page_cgroup = NULL;
26 struct page_cgroup *lookup_page_cgroup(struct page *page)
28 unsigned long pfn = page_to_pfn(page);
29 unsigned long offset;
30 struct page_cgroup *base;
32 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
33 if (unlikely(!base))
34 return NULL;
36 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
37 return base + offset;
40 static int __init alloc_node_page_cgroup(int nid)
42 struct page_cgroup *base, *pc;
43 unsigned long table_size;
44 unsigned long start_pfn, nr_pages, index;
46 start_pfn = NODE_DATA(nid)->node_start_pfn;
47 nr_pages = NODE_DATA(nid)->node_spanned_pages;
49 table_size = sizeof(struct page_cgroup) * nr_pages;
51 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
52 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
53 if (!base)
54 return -ENOMEM;
55 for (index = 0; index < nr_pages; index++) {
56 pc = base + index;
57 __init_page_cgroup(pc, start_pfn + index);
59 NODE_DATA(nid)->node_page_cgroup = base;
60 total_usage += table_size;
61 return 0;
64 void __init page_cgroup_init(void)
67 int nid, fail;
69 for_each_online_node(nid) {
70 fail = alloc_node_page_cgroup(nid);
71 if (fail)
72 goto fail;
74 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
75 printk(KERN_INFO "please try cgroup_disable=memory option if you"
76 " don't want\n");
77 return;
78 fail:
79 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
80 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
81 panic("Out of memory");
84 #else /* CONFIG_FLAT_NODE_MEM_MAP */
86 struct page_cgroup *lookup_page_cgroup(struct page *page)
88 unsigned long pfn = page_to_pfn(page);
89 struct mem_section *section = __pfn_to_section(pfn);
91 return section->page_cgroup + pfn;
94 int __meminit init_section_page_cgroup(unsigned long pfn)
96 struct mem_section *section;
97 struct page_cgroup *base, *pc;
98 unsigned long table_size;
99 int nid, index;
101 section = __pfn_to_section(pfn);
103 if (section->page_cgroup)
104 return 0;
106 nid = page_to_nid(pfn_to_page(pfn));
108 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
109 base = kmalloc_node(table_size, GFP_KERNEL, nid);
110 if (!base)
111 base = vmalloc_node(table_size, nid);
113 if (!base) {
114 printk(KERN_ERR "page cgroup allocation failure\n");
115 return -ENOMEM;
118 for (index = 0; index < PAGES_PER_SECTION; index++) {
119 pc = base + index;
120 __init_page_cgroup(pc, pfn + index);
123 section = __pfn_to_section(pfn);
124 section->page_cgroup = base - pfn;
125 total_usage += table_size;
126 return 0;
128 #ifdef CONFIG_MEMORY_HOTPLUG
129 void __free_page_cgroup(unsigned long pfn)
131 struct mem_section *ms;
132 struct page_cgroup *base;
134 ms = __pfn_to_section(pfn);
135 if (!ms || !ms->page_cgroup)
136 return;
137 base = ms->page_cgroup + pfn;
138 ms->page_cgroup = NULL;
139 if (is_vmalloc_addr(base))
140 vfree(base);
141 else
142 kfree(base);
145 int online_page_cgroup(unsigned long start_pfn,
146 unsigned long nr_pages,
147 int nid)
149 unsigned long start, end, pfn;
150 int fail = 0;
152 start = start_pfn & (PAGES_PER_SECTION - 1);
153 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
155 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
156 if (!pfn_present(pfn))
157 continue;
158 fail = init_section_page_cgroup(pfn);
160 if (!fail)
161 return 0;
163 /* rollback */
164 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
165 __free_page_cgroup(pfn);
167 return -ENOMEM;
170 int offline_page_cgroup(unsigned long start_pfn,
171 unsigned long nr_pages, int nid)
173 unsigned long start, end, pfn;
175 start = start_pfn & (PAGES_PER_SECTION - 1);
176 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
178 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
179 __free_page_cgroup(pfn);
180 return 0;
184 static int page_cgroup_callback(struct notifier_block *self,
185 unsigned long action, void *arg)
187 struct memory_notify *mn = arg;
188 int ret = 0;
189 switch (action) {
190 case MEM_GOING_ONLINE:
191 ret = online_page_cgroup(mn->start_pfn,
192 mn->nr_pages, mn->status_change_nid);
193 break;
194 case MEM_CANCEL_ONLINE:
195 case MEM_OFFLINE:
196 offline_page_cgroup(mn->start_pfn,
197 mn->nr_pages, mn->status_change_nid);
198 break;
199 case MEM_GOING_OFFLINE:
200 break;
201 case MEM_ONLINE:
202 case MEM_CANCEL_OFFLINE:
203 break;
205 ret = notifier_from_errno(ret);
206 return ret;
209 #endif
211 void __init page_cgroup_init(void)
213 unsigned long pfn;
214 int fail = 0;
216 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
217 if (!pfn_present(pfn))
218 continue;
219 fail = init_section_page_cgroup(pfn);
221 if (fail) {
222 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
223 panic("Out of memory");
224 } else {
225 hotplug_memory_notifier(page_cgroup_callback, 0);
227 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
228 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
229 " want\n");
232 void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
234 return;
237 #endif