2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12 #include <linux/kmemleak.h>
14 static void __meminit
init_page_cgroup(struct page_cgroup
*pc
, unsigned long id
)
17 set_page_cgroup_array_id(pc
, id
);
18 pc
->mem_cgroup
= NULL
;
19 INIT_LIST_HEAD(&pc
->lru
);
21 static unsigned long total_usage
;
23 #if !defined(CONFIG_SPARSEMEM)
26 void __meminit
pgdat_page_cgroup_init(struct pglist_data
*pgdat
)
28 pgdat
->node_page_cgroup
= NULL
;
31 struct page_cgroup
*lookup_page_cgroup(struct page
*page
)
33 unsigned long pfn
= page_to_pfn(page
);
35 struct page_cgroup
*base
;
37 base
= NODE_DATA(page_to_nid(page
))->node_page_cgroup
;
41 offset
= pfn
- NODE_DATA(page_to_nid(page
))->node_start_pfn
;
45 struct page
*lookup_cgroup_page(struct page_cgroup
*pc
)
51 pgdat
= NODE_DATA(page_cgroup_array_id(pc
));
52 pfn
= pc
- pgdat
->node_page_cgroup
+ pgdat
->node_start_pfn
;
53 page
= pfn_to_page(pfn
);
54 VM_BUG_ON(pc
!= lookup_page_cgroup(page
));
58 static int __init
alloc_node_page_cgroup(int nid
)
60 struct page_cgroup
*base
, *pc
;
61 unsigned long table_size
;
62 unsigned long start_pfn
, nr_pages
, index
;
64 start_pfn
= NODE_DATA(nid
)->node_start_pfn
;
65 nr_pages
= NODE_DATA(nid
)->node_spanned_pages
;
70 table_size
= sizeof(struct page_cgroup
) * nr_pages
;
72 base
= __alloc_bootmem_node_nopanic(NODE_DATA(nid
),
73 table_size
, PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
76 for (index
= 0; index
< nr_pages
; index
++) {
78 init_page_cgroup(pc
, nid
);
80 NODE_DATA(nid
)->node_page_cgroup
= base
;
81 total_usage
+= table_size
;
85 void __init
page_cgroup_init_flatmem(void)
90 if (mem_cgroup_disabled())
93 for_each_online_node(nid
) {
94 fail
= alloc_node_page_cgroup(nid
);
98 printk(KERN_INFO
"allocated %ld bytes of page_cgroup\n", total_usage
);
99 printk(KERN_INFO
"please try 'cgroup_disable=memory' option if you"
100 " don't want memory cgroups\n");
103 printk(KERN_CRIT
"allocation of page_cgroup failed.\n");
104 printk(KERN_CRIT
"please try 'cgroup_disable=memory' boot option\n");
105 panic("Out of memory");
108 #else /* CONFIG_FLAT_NODE_MEM_MAP */
110 struct page_cgroup
*lookup_page_cgroup(struct page
*page
)
112 unsigned long pfn
= page_to_pfn(page
);
113 struct mem_section
*section
= __pfn_to_section(pfn
);
115 if (!section
->page_cgroup
)
117 return section
->page_cgroup
+ pfn
;
120 struct page
*lookup_cgroup_page(struct page_cgroup
*pc
)
122 struct mem_section
*section
;
126 nr
= page_cgroup_array_id(pc
);
127 section
= __nr_to_section(nr
);
128 page
= pfn_to_page(pc
- section
->page_cgroup
);
129 VM_BUG_ON(pc
!= lookup_page_cgroup(page
));
133 static void *__meminit
alloc_page_cgroup(size_t size
, int nid
)
137 addr
= alloc_pages_exact_nid(nid
, size
, GFP_KERNEL
| __GFP_NOWARN
);
141 if (node_state(nid
, N_HIGH_MEMORY
))
142 addr
= vmalloc_node(size
, nid
);
144 addr
= vmalloc(size
);
149 #ifdef CONFIG_MEMORY_HOTPLUG
150 static void free_page_cgroup(void *addr
)
152 if (is_vmalloc_addr(addr
)) {
155 struct page
*page
= virt_to_page(addr
);
157 sizeof(struct page_cgroup
) * PAGES_PER_SECTION
;
159 BUG_ON(PageReserved(page
));
160 free_pages_exact(addr
, table_size
);
165 static int __meminit
init_section_page_cgroup(unsigned long pfn
, int nid
)
167 struct page_cgroup
*base
, *pc
;
168 struct mem_section
*section
;
169 unsigned long table_size
;
173 nr
= pfn_to_section_nr(pfn
);
174 section
= __nr_to_section(nr
);
176 if (section
->page_cgroup
)
179 table_size
= sizeof(struct page_cgroup
) * PAGES_PER_SECTION
;
180 base
= alloc_page_cgroup(table_size
, nid
);
183 * The value stored in section->page_cgroup is (base - pfn)
184 * and it does not point to the memory block allocated above,
185 * causing kmemleak false positives.
187 kmemleak_not_leak(base
);
190 printk(KERN_ERR
"page cgroup allocation failure\n");
194 for (index
= 0; index
< PAGES_PER_SECTION
; index
++) {
196 init_page_cgroup(pc
, nr
);
199 * The passed "pfn" may not be aligned to SECTION. For the calculation
200 * we need to apply a mask.
202 pfn
&= PAGE_SECTION_MASK
;
203 section
->page_cgroup
= base
- pfn
;
204 total_usage
+= table_size
;
207 #ifdef CONFIG_MEMORY_HOTPLUG
208 void __free_page_cgroup(unsigned long pfn
)
210 struct mem_section
*ms
;
211 struct page_cgroup
*base
;
213 ms
= __pfn_to_section(pfn
);
214 if (!ms
|| !ms
->page_cgroup
)
216 base
= ms
->page_cgroup
+ pfn
;
217 free_page_cgroup(base
);
218 ms
->page_cgroup
= NULL
;
221 int __meminit
online_page_cgroup(unsigned long start_pfn
,
222 unsigned long nr_pages
,
225 unsigned long start
, end
, pfn
;
228 start
= start_pfn
& ~(PAGES_PER_SECTION
- 1);
229 end
= ALIGN(start_pfn
+ nr_pages
, PAGES_PER_SECTION
);
233 * In this case, "nid" already exists and contains valid memory.
234 * "start_pfn" passed to us is a pfn which is an arg for
235 * online__pages(), and start_pfn should exist.
237 nid
= pfn_to_nid(start_pfn
);
238 VM_BUG_ON(!node_state(nid
, N_ONLINE
));
241 for (pfn
= start
; !fail
&& pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
242 if (!pfn_present(pfn
))
244 fail
= init_section_page_cgroup(pfn
, nid
);
250 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
251 __free_page_cgroup(pfn
);
256 int __meminit
offline_page_cgroup(unsigned long start_pfn
,
257 unsigned long nr_pages
, int nid
)
259 unsigned long start
, end
, pfn
;
261 start
= start_pfn
& ~(PAGES_PER_SECTION
- 1);
262 end
= ALIGN(start_pfn
+ nr_pages
, PAGES_PER_SECTION
);
264 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
265 __free_page_cgroup(pfn
);
270 static int __meminit
page_cgroup_callback(struct notifier_block
*self
,
271 unsigned long action
, void *arg
)
273 struct memory_notify
*mn
= arg
;
276 case MEM_GOING_ONLINE
:
277 ret
= online_page_cgroup(mn
->start_pfn
,
278 mn
->nr_pages
, mn
->status_change_nid
);
281 offline_page_cgroup(mn
->start_pfn
,
282 mn
->nr_pages
, mn
->status_change_nid
);
284 case MEM_CANCEL_ONLINE
:
285 case MEM_GOING_OFFLINE
:
288 case MEM_CANCEL_OFFLINE
:
292 return notifier_from_errno(ret
);
297 void __init
page_cgroup_init(void)
302 if (mem_cgroup_disabled())
305 for_each_node_state(nid
, N_HIGH_MEMORY
) {
306 unsigned long start_pfn
, end_pfn
;
308 start_pfn
= node_start_pfn(nid
);
309 end_pfn
= node_end_pfn(nid
);
311 * start_pfn and end_pfn may not be aligned to SECTION and the
312 * page->flags of out of node pages are not initialized. So we
313 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
315 for (pfn
= start_pfn
;
317 pfn
= ALIGN(pfn
+ 1, PAGES_PER_SECTION
)) {
322 * Nodes's pfns can be overlapping.
323 * We know some arch can have a nodes layout such as
324 * -------------pfn-------------->
325 * N0 | N1 | N2 | N0 | N1 | N2|....
327 if (pfn_to_nid(pfn
) != nid
)
329 if (init_section_page_cgroup(pfn
, nid
))
333 hotplug_memory_notifier(page_cgroup_callback
, 0);
334 printk(KERN_INFO
"allocated %ld bytes of page_cgroup\n", total_usage
);
335 printk(KERN_INFO
"please try 'cgroup_disable=memory' option if you "
336 "don't want memory cgroups\n");
339 printk(KERN_CRIT
"try 'cgroup_disable=memory' boot option\n");
340 panic("Out of memory");
343 void __meminit
pgdat_page_cgroup_init(struct pglist_data
*pgdat
)
351 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
353 static DEFINE_MUTEX(swap_cgroup_mutex
);
354 struct swap_cgroup_ctrl
{
356 unsigned long length
;
360 struct swap_cgroup_ctrl swap_cgroup_ctrl
[MAX_SWAPFILES
];
365 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
366 #define SC_POS_MASK (SC_PER_PAGE - 1)
369 * SwapCgroup implements "lookup" and "exchange" operations.
370 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
371 * against SwapCache. At swap_free(), this is accessed directly from swap.
374 * - we have no race in "exchange" when we're accessed via SwapCache because
375 * SwapCache(and its swp_entry) is under lock.
376 * - When called via swap_free(), there is no user of this entry and no race.
377 * Then, we don't need lock around "exchange".
379 * TODO: we can push these buffers out to HIGHMEM.
383 * allocate buffer for swap_cgroup.
385 static int swap_cgroup_prepare(int type
)
388 struct swap_cgroup_ctrl
*ctrl
;
389 unsigned long idx
, max
;
391 ctrl
= &swap_cgroup_ctrl
[type
];
393 for (idx
= 0; idx
< ctrl
->length
; idx
++) {
394 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
396 goto not_enough_page
;
397 ctrl
->map
[idx
] = page
;
402 for (idx
= 0; idx
< max
; idx
++)
403 __free_page(ctrl
->map
[idx
]);
409 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
410 * @end: swap entry to be cmpxchged
414 * Returns old id at success, 0 at failure.
415 * (There is no mem_cgroup using 0 as its id)
417 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent
,
418 unsigned short old
, unsigned short new)
420 int type
= swp_type(ent
);
421 unsigned long offset
= swp_offset(ent
);
422 unsigned long idx
= offset
/ SC_PER_PAGE
;
423 unsigned long pos
= offset
& SC_POS_MASK
;
424 struct swap_cgroup_ctrl
*ctrl
;
425 struct page
*mappage
;
426 struct swap_cgroup
*sc
;
428 unsigned short retval
;
430 ctrl
= &swap_cgroup_ctrl
[type
];
432 mappage
= ctrl
->map
[idx
];
433 sc
= page_address(mappage
);
435 spin_lock_irqsave(&ctrl
->lock
, flags
);
441 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
446 * swap_cgroup_record - record mem_cgroup for this swp_entry.
447 * @ent: swap entry to be recorded into
448 * @mem: mem_cgroup to be recorded
450 * Returns old value at success, 0 at failure.
451 * (Of course, old value can be 0.)
453 unsigned short swap_cgroup_record(swp_entry_t ent
, unsigned short id
)
455 int type
= swp_type(ent
);
456 unsigned long offset
= swp_offset(ent
);
457 unsigned long idx
= offset
/ SC_PER_PAGE
;
458 unsigned long pos
= offset
& SC_POS_MASK
;
459 struct swap_cgroup_ctrl
*ctrl
;
460 struct page
*mappage
;
461 struct swap_cgroup
*sc
;
465 ctrl
= &swap_cgroup_ctrl
[type
];
467 mappage
= ctrl
->map
[idx
];
468 sc
= page_address(mappage
);
470 spin_lock_irqsave(&ctrl
->lock
, flags
);
473 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
479 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
480 * @ent: swap entry to be looked up.
482 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
484 unsigned short lookup_swap_cgroup(swp_entry_t ent
)
486 int type
= swp_type(ent
);
487 unsigned long offset
= swp_offset(ent
);
488 unsigned long idx
= offset
/ SC_PER_PAGE
;
489 unsigned long pos
= offset
& SC_POS_MASK
;
490 struct swap_cgroup_ctrl
*ctrl
;
491 struct page
*mappage
;
492 struct swap_cgroup
*sc
;
495 ctrl
= &swap_cgroup_ctrl
[type
];
496 mappage
= ctrl
->map
[idx
];
497 sc
= page_address(mappage
);
503 int swap_cgroup_swapon(int type
, unsigned long max_pages
)
506 unsigned long array_size
;
507 unsigned long length
;
508 struct swap_cgroup_ctrl
*ctrl
;
510 if (!do_swap_account
)
513 length
= DIV_ROUND_UP(max_pages
, SC_PER_PAGE
);
514 array_size
= length
* sizeof(void *);
516 array
= vmalloc(array_size
);
520 memset(array
, 0, array_size
);
521 ctrl
= &swap_cgroup_ctrl
[type
];
522 mutex_lock(&swap_cgroup_mutex
);
523 ctrl
->length
= length
;
525 spin_lock_init(&ctrl
->lock
);
526 if (swap_cgroup_prepare(type
)) {
527 /* memory shortage */
530 mutex_unlock(&swap_cgroup_mutex
);
534 mutex_unlock(&swap_cgroup_mutex
);
538 printk(KERN_INFO
"couldn't allocate enough memory for swap_cgroup.\n");
540 "swap_cgroup can be disabled by noswapaccount boot option\n");
544 void swap_cgroup_swapoff(int type
)
547 unsigned long i
, length
;
548 struct swap_cgroup_ctrl
*ctrl
;
550 if (!do_swap_account
)
553 mutex_lock(&swap_cgroup_mutex
);
554 ctrl
= &swap_cgroup_ctrl
[type
];
556 length
= ctrl
->length
;
559 mutex_unlock(&swap_cgroup_mutex
);
562 for (i
= 0; i
< length
; i
++) {
563 struct page
*page
= map
[i
];