2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
14 __init_page_cgroup(struct page_cgroup
*pc
, unsigned long pfn
)
17 pc
->mem_cgroup
= NULL
;
18 pc
->page
= pfn_to_page(pfn
);
19 INIT_LIST_HEAD(&pc
->lru
);
21 static unsigned long total_usage
;
23 #if !defined(CONFIG_SPARSEMEM)
26 void __meminit
pgdat_page_cgroup_init(struct pglist_data
*pgdat
)
28 pgdat
->node_page_cgroup
= NULL
;
31 struct page_cgroup
*lookup_page_cgroup(struct page
*page
)
33 unsigned long pfn
= page_to_pfn(page
);
35 struct page_cgroup
*base
;
37 base
= NODE_DATA(page_to_nid(page
))->node_page_cgroup
;
41 offset
= pfn
- NODE_DATA(page_to_nid(page
))->node_start_pfn
;
45 static int __init
alloc_node_page_cgroup(int nid
)
47 struct page_cgroup
*base
, *pc
;
48 unsigned long table_size
;
49 unsigned long start_pfn
, nr_pages
, index
;
53 start_pfn
= NODE_DATA(nid
)->node_start_pfn
;
54 nr_pages
= NODE_DATA(nid
)->node_spanned_pages
;
59 table_size
= sizeof(struct page_cgroup
) * nr_pages
;
60 order
= get_order(table_size
);
61 page
= alloc_pages_node(nid
, GFP_NOWAIT
| __GFP_ZERO
, order
);
63 page
= alloc_pages_node(-1, GFP_NOWAIT
| __GFP_ZERO
, order
);
66 base
= page_address(page
);
67 for (index
= 0; index
< nr_pages
; index
++) {
69 __init_page_cgroup(pc
, start_pfn
+ index
);
71 NODE_DATA(nid
)->node_page_cgroup
= base
;
72 total_usage
+= table_size
;
76 void __init
page_cgroup_init(void)
81 if (mem_cgroup_disabled())
84 for_each_online_node(nid
) {
85 fail
= alloc_node_page_cgroup(nid
);
89 printk(KERN_INFO
"allocated %ld bytes of page_cgroup\n", total_usage
);
90 printk(KERN_INFO
"please try cgroup_disable=memory option if you"
94 printk(KERN_CRIT
"allocation of page_cgroup was failed.\n");
95 printk(KERN_CRIT
"please try cgroup_disable=memory boot option\n");
96 panic("Out of memory");
99 #else /* CONFIG_FLAT_NODE_MEM_MAP */
101 struct page_cgroup
*lookup_page_cgroup(struct page
*page
)
103 unsigned long pfn
= page_to_pfn(page
);
104 struct mem_section
*section
= __pfn_to_section(pfn
);
106 return section
->page_cgroup
+ pfn
;
109 /* __alloc_bootmem...() is protected by !slab_available() */
110 static int __init_refok
init_section_page_cgroup(unsigned long pfn
)
112 struct mem_section
*section
= __pfn_to_section(pfn
);
113 struct page_cgroup
*base
, *pc
;
114 unsigned long table_size
;
117 if (!section
->page_cgroup
) {
118 nid
= page_to_nid(pfn_to_page(pfn
));
119 table_size
= sizeof(struct page_cgroup
) * PAGES_PER_SECTION
;
120 if (slab_is_available()) {
121 base
= kmalloc_node(table_size
,
122 GFP_KERNEL
| __GFP_NOWARN
, nid
);
124 base
= vmalloc_node(table_size
, nid
);
126 base
= __alloc_bootmem_node_nopanic(NODE_DATA(nid
),
128 PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
132 * We don't have to allocate page_cgroup again, but
133 * address of memmap may be changed. So, we have to initialize
136 base
= section
->page_cgroup
+ pfn
;
138 /* check address of memmap is changed or not. */
139 if (base
->page
== pfn_to_page(pfn
))
144 printk(KERN_ERR
"page cgroup allocation failure\n");
148 for (index
= 0; index
< PAGES_PER_SECTION
; index
++) {
150 __init_page_cgroup(pc
, pfn
+ index
);
153 section
->page_cgroup
= base
- pfn
;
154 total_usage
+= table_size
;
157 #ifdef CONFIG_MEMORY_HOTPLUG
158 void __free_page_cgroup(unsigned long pfn
)
160 struct mem_section
*ms
;
161 struct page_cgroup
*base
;
163 ms
= __pfn_to_section(pfn
);
164 if (!ms
|| !ms
->page_cgroup
)
166 base
= ms
->page_cgroup
+ pfn
;
167 if (is_vmalloc_addr(base
)) {
169 ms
->page_cgroup
= NULL
;
171 struct page
*page
= virt_to_page(base
);
172 if (!PageReserved(page
)) { /* Is bootmem ? */
174 ms
->page_cgroup
= NULL
;
179 int __meminit
online_page_cgroup(unsigned long start_pfn
,
180 unsigned long nr_pages
,
183 unsigned long start
, end
, pfn
;
186 start
= start_pfn
& ~(PAGES_PER_SECTION
- 1);
187 end
= ALIGN(start_pfn
+ nr_pages
, PAGES_PER_SECTION
);
189 for (pfn
= start
; !fail
&& pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
190 if (!pfn_present(pfn
))
192 fail
= init_section_page_cgroup(pfn
);
198 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
199 __free_page_cgroup(pfn
);
204 int __meminit
offline_page_cgroup(unsigned long start_pfn
,
205 unsigned long nr_pages
, int nid
)
207 unsigned long start
, end
, pfn
;
209 start
= start_pfn
& ~(PAGES_PER_SECTION
- 1);
210 end
= ALIGN(start_pfn
+ nr_pages
, PAGES_PER_SECTION
);
212 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
213 __free_page_cgroup(pfn
);
218 static int __meminit
page_cgroup_callback(struct notifier_block
*self
,
219 unsigned long action
, void *arg
)
221 struct memory_notify
*mn
= arg
;
224 case MEM_GOING_ONLINE
:
225 ret
= online_page_cgroup(mn
->start_pfn
,
226 mn
->nr_pages
, mn
->status_change_nid
);
229 offline_page_cgroup(mn
->start_pfn
,
230 mn
->nr_pages
, mn
->status_change_nid
);
232 case MEM_CANCEL_ONLINE
:
233 case MEM_GOING_OFFLINE
:
236 case MEM_CANCEL_OFFLINE
:
241 ret
= notifier_from_errno(ret
);
250 void __init
page_cgroup_init(void)
255 if (mem_cgroup_disabled())
258 for (pfn
= 0; !fail
&& pfn
< max_pfn
; pfn
+= PAGES_PER_SECTION
) {
259 if (!pfn_present(pfn
))
261 fail
= init_section_page_cgroup(pfn
);
264 printk(KERN_CRIT
"try cgroup_disable=memory boot option\n");
265 panic("Out of memory");
267 hotplug_memory_notifier(page_cgroup_callback
, 0);
269 printk(KERN_INFO
"allocated %ld bytes of page_cgroup\n", total_usage
);
270 printk(KERN_INFO
"please try cgroup_disable=memory option if you don't"
274 void __meminit
pgdat_page_cgroup_init(struct pglist_data
*pgdat
)
282 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
284 static DEFINE_MUTEX(swap_cgroup_mutex
);
285 struct swap_cgroup_ctrl
{
287 unsigned long length
;
290 struct swap_cgroup_ctrl swap_cgroup_ctrl
[MAX_SWAPFILES
];
295 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
296 #define SC_POS_MASK (SC_PER_PAGE - 1)
299 * SwapCgroup implements "lookup" and "exchange" operations.
300 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
301 * against SwapCache. At swap_free(), this is accessed directly from swap.
304 * - we have no race in "exchange" when we're accessed via SwapCache because
305 * SwapCache(and its swp_entry) is under lock.
306 * - When called via swap_free(), there is no user of this entry and no race.
307 * Then, we don't need lock around "exchange".
309 * TODO: we can push these buffers out to HIGHMEM.
313 * allocate buffer for swap_cgroup.
315 static int swap_cgroup_prepare(int type
)
318 struct swap_cgroup_ctrl
*ctrl
;
319 unsigned long idx
, max
;
321 if (!do_swap_account
)
323 ctrl
= &swap_cgroup_ctrl
[type
];
325 for (idx
= 0; idx
< ctrl
->length
; idx
++) {
326 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
328 goto not_enough_page
;
329 ctrl
->map
[idx
] = page
;
334 for (idx
= 0; idx
< max
; idx
++)
335 __free_page(ctrl
->map
[idx
]);
341 * swap_cgroup_record - record mem_cgroup for this swp_entry.
342 * @ent: swap entry to be recorded into
343 * @mem: mem_cgroup to be recorded
345 * Returns old value at success, 0 at failure.
346 * (Of course, old value can be 0.)
348 unsigned short swap_cgroup_record(swp_entry_t ent
, unsigned short id
)
350 int type
= swp_type(ent
);
351 unsigned long offset
= swp_offset(ent
);
352 unsigned long idx
= offset
/ SC_PER_PAGE
;
353 unsigned long pos
= offset
& SC_POS_MASK
;
354 struct swap_cgroup_ctrl
*ctrl
;
355 struct page
*mappage
;
356 struct swap_cgroup
*sc
;
359 if (!do_swap_account
)
362 ctrl
= &swap_cgroup_ctrl
[type
];
364 mappage
= ctrl
->map
[idx
];
365 sc
= page_address(mappage
);
374 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
375 * @ent: swap entry to be looked up.
377 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
379 unsigned short lookup_swap_cgroup(swp_entry_t ent
)
381 int type
= swp_type(ent
);
382 unsigned long offset
= swp_offset(ent
);
383 unsigned long idx
= offset
/ SC_PER_PAGE
;
384 unsigned long pos
= offset
& SC_POS_MASK
;
385 struct swap_cgroup_ctrl
*ctrl
;
386 struct page
*mappage
;
387 struct swap_cgroup
*sc
;
390 if (!do_swap_account
)
393 ctrl
= &swap_cgroup_ctrl
[type
];
394 mappage
= ctrl
->map
[idx
];
395 sc
= page_address(mappage
);
401 int swap_cgroup_swapon(int type
, unsigned long max_pages
)
404 unsigned long array_size
;
405 unsigned long length
;
406 struct swap_cgroup_ctrl
*ctrl
;
408 if (!do_swap_account
)
411 length
= ((max_pages
/SC_PER_PAGE
) + 1);
412 array_size
= length
* sizeof(void *);
414 array
= vmalloc(array_size
);
418 memset(array
, 0, array_size
);
419 ctrl
= &swap_cgroup_ctrl
[type
];
420 mutex_lock(&swap_cgroup_mutex
);
421 ctrl
->length
= length
;
423 if (swap_cgroup_prepare(type
)) {
424 /* memory shortage */
428 mutex_unlock(&swap_cgroup_mutex
);
431 mutex_unlock(&swap_cgroup_mutex
);
435 printk(KERN_INFO
"couldn't allocate enough memory for swap_cgroup.\n");
437 "swap_cgroup can be disabled by noswapaccount boot option\n");
441 void swap_cgroup_swapoff(int type
)
444 struct swap_cgroup_ctrl
*ctrl
;
446 if (!do_swap_account
)
449 mutex_lock(&swap_cgroup_mutex
);
450 ctrl
= &swap_cgroup_ctrl
[type
];
452 for (i
= 0; i
< ctrl
->length
; i
++) {
453 struct page
*page
= ctrl
->map
[i
];
461 mutex_unlock(&swap_cgroup_mutex
);