per-zone and reclaim enhancements for memory controller: nid/zid helper function...
[linux-2.6/linux-2.6-openrd.git] / mm / memcontrol.c
blob422f779a5b2186538d2eacb5fc1e42dc5dbf4610
1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/page-flags.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bit_spinlock.h>
28 #include <linux/rcupdate.h>
29 #include <linux/swap.h>
30 #include <linux/spinlock.h>
31 #include <linux/fs.h>
32 #include <linux/seq_file.h>
34 #include <asm/uaccess.h>
36 struct cgroup_subsys mem_cgroup_subsys;
37 static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
40 * Statistics for memory cgroup.
42 enum mem_cgroup_stat_index {
44 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
46 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
47 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
49 MEM_CGROUP_STAT_NSTATS,
52 struct mem_cgroup_stat_cpu {
53 s64 count[MEM_CGROUP_STAT_NSTATS];
54 } ____cacheline_aligned_in_smp;
56 struct mem_cgroup_stat {
57 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
61 * For accounting under irq disable, no need for increment preempt count.
63 static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
64 enum mem_cgroup_stat_index idx, int val)
66 int cpu = smp_processor_id();
67 stat->cpustat[cpu].count[idx] += val;
70 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
71 enum mem_cgroup_stat_index idx)
73 int cpu;
74 s64 ret = 0;
75 for_each_possible_cpu(cpu)
76 ret += stat->cpustat[cpu].count[idx];
77 return ret;
81 * The memory controller data structure. The memory controller controls both
82 * page cache and RSS per cgroup. We would eventually like to provide
83 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
84 * to help the administrator determine what knobs to tune.
86 * TODO: Add a water mark for the memory controller. Reclaim will begin when
87 * we hit the water mark. May be even add a low water mark, such that
88 * no reclaim occurs from a cgroup at it's low water mark, this is
89 * a feature that will be implemented much later in the future.
91 struct mem_cgroup {
92 struct cgroup_subsys_state css;
94 * the counter to account for memory usage
96 struct res_counter res;
98 * Per cgroup active and inactive list, similar to the
99 * per zone LRU lists.
100 * TODO: Consider making these lists per zone
102 struct list_head active_list;
103 struct list_head inactive_list;
105 * spin_lock to protect the per cgroup LRU
107 spinlock_t lru_lock;
108 unsigned long control_type; /* control RSS or RSS+Pagecache */
110 * statistics.
112 struct mem_cgroup_stat stat;
116 * We use the lower bit of the page->page_cgroup pointer as a bit spin
117 * lock. We need to ensure that page->page_cgroup is atleast two
118 * byte aligned (based on comments from Nick Piggin)
120 #define PAGE_CGROUP_LOCK_BIT 0x0
121 #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
124 * A page_cgroup page is associated with every page descriptor. The
125 * page_cgroup helps us identify information about the cgroup
127 struct page_cgroup {
128 struct list_head lru; /* per cgroup LRU list */
129 struct page *page;
130 struct mem_cgroup *mem_cgroup;
131 atomic_t ref_cnt; /* Helpful when pages move b/w */
132 /* mapped and cached states */
133 int flags;
135 #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
136 #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
138 static inline int page_cgroup_nid(struct page_cgroup *pc)
140 return page_to_nid(pc->page);
143 static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
145 return page_zonenum(pc->page);
148 enum {
149 MEM_CGROUP_TYPE_UNSPEC = 0,
150 MEM_CGROUP_TYPE_MAPPED,
151 MEM_CGROUP_TYPE_CACHED,
152 MEM_CGROUP_TYPE_ALL,
153 MEM_CGROUP_TYPE_MAX,
156 enum charge_type {
157 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
158 MEM_CGROUP_CHARGE_TYPE_MAPPED,
162 * Always modified under lru lock. Then, not necessary to preempt_disable()
164 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
165 bool charge)
167 int val = (charge)? 1 : -1;
168 struct mem_cgroup_stat *stat = &mem->stat;
169 VM_BUG_ON(!irqs_disabled());
171 if (flags & PAGE_CGROUP_FLAG_CACHE)
172 __mem_cgroup_stat_add_safe(stat,
173 MEM_CGROUP_STAT_CACHE, val);
174 else
175 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
179 static struct mem_cgroup init_mem_cgroup;
181 static inline
182 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
184 return container_of(cgroup_subsys_state(cont,
185 mem_cgroup_subsys_id), struct mem_cgroup,
186 css);
189 static inline
190 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
192 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
193 struct mem_cgroup, css);
196 void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
198 struct mem_cgroup *mem;
200 mem = mem_cgroup_from_task(p);
201 css_get(&mem->css);
202 mm->mem_cgroup = mem;
205 void mm_free_cgroup(struct mm_struct *mm)
207 css_put(&mm->mem_cgroup->css);
210 static inline int page_cgroup_locked(struct page *page)
212 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
213 &page->page_cgroup);
216 void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
218 int locked;
221 * While resetting the page_cgroup we might not hold the
222 * page_cgroup lock. free_hot_cold_page() is an example
223 * of such a scenario
225 if (pc)
226 VM_BUG_ON(!page_cgroup_locked(page));
227 locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
228 page->page_cgroup = ((unsigned long)pc | locked);
231 struct page_cgroup *page_get_page_cgroup(struct page *page)
233 return (struct page_cgroup *)
234 (page->page_cgroup & ~PAGE_CGROUP_LOCK);
237 static void __always_inline lock_page_cgroup(struct page *page)
239 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
240 VM_BUG_ON(!page_cgroup_locked(page));
243 static void __always_inline unlock_page_cgroup(struct page *page)
245 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
249 * Tie new page_cgroup to struct page under lock_page_cgroup()
250 * This can fail if the page has been tied to a page_cgroup.
251 * If success, returns 0.
253 static int page_cgroup_assign_new_page_cgroup(struct page *page,
254 struct page_cgroup *pc)
256 int ret = 0;
258 lock_page_cgroup(page);
259 if (!page_get_page_cgroup(page))
260 page_assign_page_cgroup(page, pc);
261 else /* A page is tied to other pc. */
262 ret = 1;
263 unlock_page_cgroup(page);
264 return ret;
268 * Clear page->page_cgroup member under lock_page_cgroup().
269 * If given "pc" value is different from one page->page_cgroup,
270 * page->cgroup is not cleared.
271 * Returns a value of page->page_cgroup at lock taken.
272 * A can can detect failure of clearing by following
273 * clear_page_cgroup(page, pc) == pc
276 static struct page_cgroup *clear_page_cgroup(struct page *page,
277 struct page_cgroup *pc)
279 struct page_cgroup *ret;
280 /* lock and clear */
281 lock_page_cgroup(page);
282 ret = page_get_page_cgroup(page);
283 if (likely(ret == pc))
284 page_assign_page_cgroup(page, NULL);
285 unlock_page_cgroup(page);
286 return ret;
289 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
291 if (active) {
292 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
293 list_move(&pc->lru, &pc->mem_cgroup->active_list);
294 } else {
295 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
296 list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
300 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
302 int ret;
304 task_lock(task);
305 ret = task->mm && mm_cgroup(task->mm) == mem;
306 task_unlock(task);
307 return ret;
311 * This routine assumes that the appropriate zone's lru lock is already held
313 void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
315 struct mem_cgroup *mem;
316 if (!pc)
317 return;
319 mem = pc->mem_cgroup;
321 spin_lock(&mem->lru_lock);
322 __mem_cgroup_move_lists(pc, active);
323 spin_unlock(&mem->lru_lock);
326 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
327 struct list_head *dst,
328 unsigned long *scanned, int order,
329 int mode, struct zone *z,
330 struct mem_cgroup *mem_cont,
331 int active)
333 unsigned long nr_taken = 0;
334 struct page *page;
335 unsigned long scan;
336 LIST_HEAD(pc_list);
337 struct list_head *src;
338 struct page_cgroup *pc, *tmp;
340 if (active)
341 src = &mem_cont->active_list;
342 else
343 src = &mem_cont->inactive_list;
345 spin_lock(&mem_cont->lru_lock);
346 scan = 0;
347 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
348 if (scan >= nr_to_scan)
349 break;
350 page = pc->page;
351 VM_BUG_ON(!pc);
353 if (unlikely(!PageLRU(page)))
354 continue;
356 if (PageActive(page) && !active) {
357 __mem_cgroup_move_lists(pc, true);
358 continue;
360 if (!PageActive(page) && active) {
361 __mem_cgroup_move_lists(pc, false);
362 continue;
366 * Reclaim, per zone
367 * TODO: make the active/inactive lists per zone
369 if (page_zone(page) != z)
370 continue;
372 scan++;
373 list_move(&pc->lru, &pc_list);
375 if (__isolate_lru_page(page, mode) == 0) {
376 list_move(&page->lru, dst);
377 nr_taken++;
381 list_splice(&pc_list, src);
382 spin_unlock(&mem_cont->lru_lock);
384 *scanned = scan;
385 return nr_taken;
389 * Charge the memory controller for page usage.
390 * Return
391 * 0 if the charge was successful
392 * < 0 if the cgroup is over its limit
394 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
395 gfp_t gfp_mask, enum charge_type ctype)
397 struct mem_cgroup *mem;
398 struct page_cgroup *pc;
399 unsigned long flags;
400 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
403 * Should page_cgroup's go to their own slab?
404 * One could optimize the performance of the charging routine
405 * by saving a bit in the page_flags and using it as a lock
406 * to see if the cgroup page already has a page_cgroup associated
407 * with it
409 retry:
410 if (page) {
411 lock_page_cgroup(page);
412 pc = page_get_page_cgroup(page);
414 * The page_cgroup exists and
415 * the page has already been accounted.
417 if (pc) {
418 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
419 /* this page is under being uncharged ? */
420 unlock_page_cgroup(page);
421 cpu_relax();
422 goto retry;
423 } else {
424 unlock_page_cgroup(page);
425 goto done;
428 unlock_page_cgroup(page);
431 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
432 if (pc == NULL)
433 goto err;
436 * We always charge the cgroup the mm_struct belongs to.
437 * The mm_struct's mem_cgroup changes on task migration if the
438 * thread group leader migrates. It's possible that mm is not
439 * set, if so charge the init_mm (happens for pagecache usage).
441 if (!mm)
442 mm = &init_mm;
444 rcu_read_lock();
445 mem = rcu_dereference(mm->mem_cgroup);
447 * For every charge from the cgroup, increment reference
448 * count
450 css_get(&mem->css);
451 rcu_read_unlock();
454 * If we created the page_cgroup, we should free it on exceeding
455 * the cgroup limit.
457 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
458 if (!(gfp_mask & __GFP_WAIT))
459 goto out;
461 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
462 continue;
465 * try_to_free_mem_cgroup_pages() might not give us a full
466 * picture of reclaim. Some pages are reclaimed and might be
467 * moved to swap cache or just unmapped from the cgroup.
468 * Check the limit again to see if the reclaim reduced the
469 * current usage of the cgroup before giving up
471 if (res_counter_check_under_limit(&mem->res))
472 continue;
474 if (!nr_retries--) {
475 mem_cgroup_out_of_memory(mem, gfp_mask);
476 goto out;
478 congestion_wait(WRITE, HZ/10);
481 atomic_set(&pc->ref_cnt, 1);
482 pc->mem_cgroup = mem;
483 pc->page = page;
484 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
485 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
486 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
488 if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
490 * Another charge has been added to this page already.
491 * We take lock_page_cgroup(page) again and read
492 * page->cgroup, increment refcnt.... just retry is OK.
494 res_counter_uncharge(&mem->res, PAGE_SIZE);
495 css_put(&mem->css);
496 kfree(pc);
497 if (!page)
498 goto done;
499 goto retry;
502 spin_lock_irqsave(&mem->lru_lock, flags);
503 /* Update statistics vector */
504 mem_cgroup_charge_statistics(mem, pc->flags, true);
505 list_add(&pc->lru, &mem->active_list);
506 spin_unlock_irqrestore(&mem->lru_lock, flags);
508 done:
509 return 0;
510 out:
511 css_put(&mem->css);
512 kfree(pc);
513 err:
514 return -ENOMEM;
517 int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
518 gfp_t gfp_mask)
520 return mem_cgroup_charge_common(page, mm, gfp_mask,
521 MEM_CGROUP_CHARGE_TYPE_MAPPED);
525 * See if the cached pages should be charged at all?
527 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
528 gfp_t gfp_mask)
530 int ret = 0;
531 struct mem_cgroup *mem;
532 if (!mm)
533 mm = &init_mm;
535 rcu_read_lock();
536 mem = rcu_dereference(mm->mem_cgroup);
537 css_get(&mem->css);
538 rcu_read_unlock();
539 if (mem->control_type == MEM_CGROUP_TYPE_ALL)
540 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
541 MEM_CGROUP_CHARGE_TYPE_CACHE);
542 css_put(&mem->css);
543 return ret;
547 * Uncharging is always a welcome operation, we never complain, simply
548 * uncharge.
550 void mem_cgroup_uncharge(struct page_cgroup *pc)
552 struct mem_cgroup *mem;
553 struct page *page;
554 unsigned long flags;
557 * This can handle cases when a page is not charged at all and we
558 * are switching between handling the control_type.
560 if (!pc)
561 return;
563 if (atomic_dec_and_test(&pc->ref_cnt)) {
564 page = pc->page;
566 * get page->cgroup and clear it under lock.
567 * force_empty can drop page->cgroup without checking refcnt.
569 if (clear_page_cgroup(page, pc) == pc) {
570 mem = pc->mem_cgroup;
571 css_put(&mem->css);
572 res_counter_uncharge(&mem->res, PAGE_SIZE);
573 spin_lock_irqsave(&mem->lru_lock, flags);
574 list_del_init(&pc->lru);
575 mem_cgroup_charge_statistics(mem, pc->flags, false);
576 spin_unlock_irqrestore(&mem->lru_lock, flags);
577 kfree(pc);
582 * Returns non-zero if a page (under migration) has valid page_cgroup member.
583 * Refcnt of page_cgroup is incremented.
586 int mem_cgroup_prepare_migration(struct page *page)
588 struct page_cgroup *pc;
589 int ret = 0;
590 lock_page_cgroup(page);
591 pc = page_get_page_cgroup(page);
592 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
593 ret = 1;
594 unlock_page_cgroup(page);
595 return ret;
598 void mem_cgroup_end_migration(struct page *page)
600 struct page_cgroup *pc = page_get_page_cgroup(page);
601 mem_cgroup_uncharge(pc);
604 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
605 * And no race with uncharge() routines because page_cgroup for *page*
606 * has extra one reference by mem_cgroup_prepare_migration.
609 void mem_cgroup_page_migration(struct page *page, struct page *newpage)
611 struct page_cgroup *pc;
612 retry:
613 pc = page_get_page_cgroup(page);
614 if (!pc)
615 return;
616 if (clear_page_cgroup(page, pc) != pc)
617 goto retry;
618 pc->page = newpage;
619 lock_page_cgroup(newpage);
620 page_assign_page_cgroup(newpage, pc);
621 unlock_page_cgroup(newpage);
622 return;
626 * This routine traverse page_cgroup in given list and drop them all.
627 * This routine ignores page_cgroup->ref_cnt.
628 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
630 #define FORCE_UNCHARGE_BATCH (128)
631 static void
632 mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list)
634 struct page_cgroup *pc;
635 struct page *page;
636 int count;
637 unsigned long flags;
639 retry:
640 count = FORCE_UNCHARGE_BATCH;
641 spin_lock_irqsave(&mem->lru_lock, flags);
643 while (--count && !list_empty(list)) {
644 pc = list_entry(list->prev, struct page_cgroup, lru);
645 page = pc->page;
646 /* Avoid race with charge */
647 atomic_set(&pc->ref_cnt, 0);
648 if (clear_page_cgroup(page, pc) == pc) {
649 css_put(&mem->css);
650 res_counter_uncharge(&mem->res, PAGE_SIZE);
651 list_del_init(&pc->lru);
652 mem_cgroup_charge_statistics(mem, pc->flags, false);
653 kfree(pc);
654 } else /* being uncharged ? ...do relax */
655 break;
657 spin_unlock_irqrestore(&mem->lru_lock, flags);
658 if (!list_empty(list)) {
659 cond_resched();
660 goto retry;
662 return;
666 * make mem_cgroup's charge to be 0 if there is no task.
667 * This enables deleting this mem_cgroup.
670 int mem_cgroup_force_empty(struct mem_cgroup *mem)
672 int ret = -EBUSY;
673 css_get(&mem->css);
675 * page reclaim code (kswapd etc..) will move pages between
676 ` * active_list <-> inactive_list while we don't take a lock.
677 * So, we have to do loop here until all lists are empty.
679 while (!(list_empty(&mem->active_list) &&
680 list_empty(&mem->inactive_list))) {
681 if (atomic_read(&mem->css.cgroup->count) > 0)
682 goto out;
683 /* drop all page_cgroup in active_list */
684 mem_cgroup_force_empty_list(mem, &mem->active_list);
685 /* drop all page_cgroup in inactive_list */
686 mem_cgroup_force_empty_list(mem, &mem->inactive_list);
688 ret = 0;
689 out:
690 css_put(&mem->css);
691 return ret;
696 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
698 *tmp = memparse(buf, &buf);
699 if (*buf != '\0')
700 return -EINVAL;
703 * Round up the value to the closest page size
705 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
706 return 0;
709 static ssize_t mem_cgroup_read(struct cgroup *cont,
710 struct cftype *cft, struct file *file,
711 char __user *userbuf, size_t nbytes, loff_t *ppos)
713 return res_counter_read(&mem_cgroup_from_cont(cont)->res,
714 cft->private, userbuf, nbytes, ppos,
715 NULL);
718 static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
719 struct file *file, const char __user *userbuf,
720 size_t nbytes, loff_t *ppos)
722 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
723 cft->private, userbuf, nbytes, ppos,
724 mem_cgroup_write_strategy);
727 static ssize_t mem_control_type_write(struct cgroup *cont,
728 struct cftype *cft, struct file *file,
729 const char __user *userbuf,
730 size_t nbytes, loff_t *pos)
732 int ret;
733 char *buf, *end;
734 unsigned long tmp;
735 struct mem_cgroup *mem;
737 mem = mem_cgroup_from_cont(cont);
738 buf = kmalloc(nbytes + 1, GFP_KERNEL);
739 ret = -ENOMEM;
740 if (buf == NULL)
741 goto out;
743 buf[nbytes] = 0;
744 ret = -EFAULT;
745 if (copy_from_user(buf, userbuf, nbytes))
746 goto out_free;
748 ret = -EINVAL;
749 tmp = simple_strtoul(buf, &end, 10);
750 if (*end != '\0')
751 goto out_free;
753 if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
754 goto out_free;
756 mem->control_type = tmp;
757 ret = nbytes;
758 out_free:
759 kfree(buf);
760 out:
761 return ret;
764 static ssize_t mem_control_type_read(struct cgroup *cont,
765 struct cftype *cft,
766 struct file *file, char __user *userbuf,
767 size_t nbytes, loff_t *ppos)
769 unsigned long val;
770 char buf[64], *s;
771 struct mem_cgroup *mem;
773 mem = mem_cgroup_from_cont(cont);
774 s = buf;
775 val = mem->control_type;
776 s += sprintf(s, "%lu\n", val);
777 return simple_read_from_buffer((void __user *)userbuf, nbytes,
778 ppos, buf, s - buf);
782 static ssize_t mem_force_empty_write(struct cgroup *cont,
783 struct cftype *cft, struct file *file,
784 const char __user *userbuf,
785 size_t nbytes, loff_t *ppos)
787 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
788 int ret;
789 ret = mem_cgroup_force_empty(mem);
790 if (!ret)
791 ret = nbytes;
792 return ret;
796 * Note: This should be removed if cgroup supports write-only file.
799 static ssize_t mem_force_empty_read(struct cgroup *cont,
800 struct cftype *cft,
801 struct file *file, char __user *userbuf,
802 size_t nbytes, loff_t *ppos)
804 return -EINVAL;
808 static const struct mem_cgroup_stat_desc {
809 const char *msg;
810 u64 unit;
811 } mem_cgroup_stat_desc[] = {
812 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
813 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
816 static int mem_control_stat_show(struct seq_file *m, void *arg)
818 struct cgroup *cont = m->private;
819 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
820 struct mem_cgroup_stat *stat = &mem_cont->stat;
821 int i;
823 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
824 s64 val;
826 val = mem_cgroup_read_stat(stat, i);
827 val *= mem_cgroup_stat_desc[i].unit;
828 seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
829 (long long)val);
831 return 0;
834 static const struct file_operations mem_control_stat_file_operations = {
835 .read = seq_read,
836 .llseek = seq_lseek,
837 .release = single_release,
840 static int mem_control_stat_open(struct inode *unused, struct file *file)
842 /* XXX __d_cont */
843 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
845 file->f_op = &mem_control_stat_file_operations;
846 return single_open(file, mem_control_stat_show, cont);
851 static struct cftype mem_cgroup_files[] = {
853 .name = "usage_in_bytes",
854 .private = RES_USAGE,
855 .read = mem_cgroup_read,
858 .name = "limit_in_bytes",
859 .private = RES_LIMIT,
860 .write = mem_cgroup_write,
861 .read = mem_cgroup_read,
864 .name = "failcnt",
865 .private = RES_FAILCNT,
866 .read = mem_cgroup_read,
869 .name = "control_type",
870 .write = mem_control_type_write,
871 .read = mem_control_type_read,
874 .name = "force_empty",
875 .write = mem_force_empty_write,
876 .read = mem_force_empty_read,
879 .name = "stat",
880 .open = mem_control_stat_open,
884 static struct mem_cgroup init_mem_cgroup;
886 static struct cgroup_subsys_state *
887 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
889 struct mem_cgroup *mem;
891 if (unlikely((cont->parent) == NULL)) {
892 mem = &init_mem_cgroup;
893 init_mm.mem_cgroup = mem;
894 } else
895 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
897 if (mem == NULL)
898 return NULL;
900 res_counter_init(&mem->res);
901 INIT_LIST_HEAD(&mem->active_list);
902 INIT_LIST_HEAD(&mem->inactive_list);
903 spin_lock_init(&mem->lru_lock);
904 mem->control_type = MEM_CGROUP_TYPE_ALL;
905 return &mem->css;
908 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
909 struct cgroup *cont)
911 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
912 mem_cgroup_force_empty(mem);
915 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
916 struct cgroup *cont)
918 kfree(mem_cgroup_from_cont(cont));
921 static int mem_cgroup_populate(struct cgroup_subsys *ss,
922 struct cgroup *cont)
924 return cgroup_add_files(cont, ss, mem_cgroup_files,
925 ARRAY_SIZE(mem_cgroup_files));
928 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
929 struct cgroup *cont,
930 struct cgroup *old_cont,
931 struct task_struct *p)
933 struct mm_struct *mm;
934 struct mem_cgroup *mem, *old_mem;
936 mm = get_task_mm(p);
937 if (mm == NULL)
938 return;
940 mem = mem_cgroup_from_cont(cont);
941 old_mem = mem_cgroup_from_cont(old_cont);
943 if (mem == old_mem)
944 goto out;
947 * Only thread group leaders are allowed to migrate, the mm_struct is
948 * in effect owned by the leader
950 if (p->tgid != p->pid)
951 goto out;
953 css_get(&mem->css);
954 rcu_assign_pointer(mm->mem_cgroup, mem);
955 css_put(&old_mem->css);
957 out:
958 mmput(mm);
959 return;
962 struct cgroup_subsys mem_cgroup_subsys = {
963 .name = "memory",
964 .subsys_id = mem_cgroup_subsys_id,
965 .create = mem_cgroup_create,
966 .pre_destroy = mem_cgroup_pre_destroy,
967 .destroy = mem_cgroup_destroy,
968 .populate = mem_cgroup_populate,
969 .attach = mem_cgroup_move_task,
970 .early_init = 1,