1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/memcontrol.h>
4 #include <linux/swap.h>
5 #include <linux/mm_inline.h>
6 #include <linux/pagewalk.h>
7 #include <linux/backing-dev.h>
8 #include <linux/swap_cgroup.h>
9 #include <linux/eventfd.h>
10 #include <linux/poll.h>
11 #include <linux/sort.h>
12 #include <linux/file.h>
13 #include <linux/seq_buf.h>
17 #include "memcontrol-v1.h"
20 * Cgroups above their limits are maintained in a RB-Tree, independent of
21 * their hierarchy representation
24 struct mem_cgroup_tree_per_node
{
25 struct rb_root rb_root
;
26 struct rb_node
*rb_rightmost
;
30 struct mem_cgroup_tree
{
31 struct mem_cgroup_tree_per_node
*rb_tree_per_node
[MAX_NUMNODES
];
34 static struct mem_cgroup_tree soft_limit_tree __read_mostly
;
37 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
38 * limit reclaim to prevent infinite loops, if they ever occur.
40 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
41 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
43 /* Stuffs for move charges at task migration. */
45 * Types of charges to be moved.
47 #define MOVE_ANON 0x1ULL
48 #define MOVE_FILE 0x2ULL
49 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
51 /* "mc" and its members are protected by cgroup_mutex */
52 static struct move_charge_struct
{
53 spinlock_t lock
; /* for from, to */
55 struct mem_cgroup
*from
;
56 struct mem_cgroup
*to
;
58 unsigned long precharge
;
59 unsigned long moved_charge
;
60 unsigned long moved_swap
;
61 struct task_struct
*moving_task
; /* a task moving charges */
62 wait_queue_head_t waitq
; /* a waitq for other context */
64 .lock
= __SPIN_LOCK_UNLOCKED(mc
.lock
),
65 .waitq
= __WAIT_QUEUE_HEAD_INITIALIZER(mc
.waitq
),
69 struct mem_cgroup_eventfd_list
{
70 struct list_head list
;
71 struct eventfd_ctx
*eventfd
;
75 * cgroup_event represents events which userspace want to receive.
77 struct mem_cgroup_event
{
79 * memcg which the event belongs to.
81 struct mem_cgroup
*memcg
;
83 * eventfd to signal userspace about the event.
85 struct eventfd_ctx
*eventfd
;
87 * Each of these stored in a list by the cgroup.
89 struct list_head list
;
91 * register_event() callback will be used to add new userspace
92 * waiter for changes related to this event. Use eventfd_signal()
93 * on eventfd to send notification to userspace.
95 int (*register_event
)(struct mem_cgroup
*memcg
,
96 struct eventfd_ctx
*eventfd
, const char *args
);
98 * unregister_event() callback will be called when userspace closes
99 * the eventfd or on cgroup removing. This callback must be set,
100 * if you want provide notification functionality.
102 void (*unregister_event
)(struct mem_cgroup
*memcg
,
103 struct eventfd_ctx
*eventfd
);
105 * All fields below needed to unregister event when
106 * userspace closes eventfd.
109 wait_queue_head_t
*wqh
;
110 wait_queue_entry_t wait
;
111 struct work_struct remove
;
114 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
115 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
116 #define MEMFILE_ATTR(val) ((val) & 0xffff)
126 #ifdef CONFIG_LOCKDEP
127 static struct lockdep_map memcg_oom_lock_dep_map
= {
128 .name
= "memcg_oom_lock",
132 DEFINE_SPINLOCK(memcg_oom_lock
);
134 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node
*mz
,
135 struct mem_cgroup_tree_per_node
*mctz
,
136 unsigned long new_usage_in_excess
)
138 struct rb_node
**p
= &mctz
->rb_root
.rb_node
;
139 struct rb_node
*parent
= NULL
;
140 struct mem_cgroup_per_node
*mz_node
;
141 bool rightmost
= true;
146 mz
->usage_in_excess
= new_usage_in_excess
;
147 if (!mz
->usage_in_excess
)
151 mz_node
= rb_entry(parent
, struct mem_cgroup_per_node
,
153 if (mz
->usage_in_excess
< mz_node
->usage_in_excess
) {
162 mctz
->rb_rightmost
= &mz
->tree_node
;
164 rb_link_node(&mz
->tree_node
, parent
, p
);
165 rb_insert_color(&mz
->tree_node
, &mctz
->rb_root
);
169 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node
*mz
,
170 struct mem_cgroup_tree_per_node
*mctz
)
175 if (&mz
->tree_node
== mctz
->rb_rightmost
)
176 mctz
->rb_rightmost
= rb_prev(&mz
->tree_node
);
178 rb_erase(&mz
->tree_node
, &mctz
->rb_root
);
182 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node
*mz
,
183 struct mem_cgroup_tree_per_node
*mctz
)
187 spin_lock_irqsave(&mctz
->lock
, flags
);
188 __mem_cgroup_remove_exceeded(mz
, mctz
);
189 spin_unlock_irqrestore(&mctz
->lock
, flags
);
192 static unsigned long soft_limit_excess(struct mem_cgroup
*memcg
)
194 unsigned long nr_pages
= page_counter_read(&memcg
->memory
);
195 unsigned long soft_limit
= READ_ONCE(memcg
->soft_limit
);
196 unsigned long excess
= 0;
198 if (nr_pages
> soft_limit
)
199 excess
= nr_pages
- soft_limit
;
204 static void memcg1_update_tree(struct mem_cgroup
*memcg
, int nid
)
206 unsigned long excess
;
207 struct mem_cgroup_per_node
*mz
;
208 struct mem_cgroup_tree_per_node
*mctz
;
210 if (lru_gen_enabled()) {
211 if (soft_limit_excess(memcg
))
212 lru_gen_soft_reclaim(memcg
, nid
);
216 mctz
= soft_limit_tree
.rb_tree_per_node
[nid
];
220 * Necessary to update all ancestors when hierarchy is used.
221 * because their event counter is not touched.
223 for (; memcg
; memcg
= parent_mem_cgroup(memcg
)) {
224 mz
= memcg
->nodeinfo
[nid
];
225 excess
= soft_limit_excess(memcg
);
227 * We have to update the tree if mz is on RB-tree or
228 * mem is over its softlimit.
230 if (excess
|| mz
->on_tree
) {
233 spin_lock_irqsave(&mctz
->lock
, flags
);
234 /* if on-tree, remove it */
236 __mem_cgroup_remove_exceeded(mz
, mctz
);
238 * Insert again. mz->usage_in_excess will be updated.
239 * If excess is 0, no tree ops.
241 __mem_cgroup_insert_exceeded(mz
, mctz
, excess
);
242 spin_unlock_irqrestore(&mctz
->lock
, flags
);
247 void memcg1_remove_from_trees(struct mem_cgroup
*memcg
)
249 struct mem_cgroup_tree_per_node
*mctz
;
250 struct mem_cgroup_per_node
*mz
;
254 mz
= memcg
->nodeinfo
[nid
];
255 mctz
= soft_limit_tree
.rb_tree_per_node
[nid
];
257 mem_cgroup_remove_exceeded(mz
, mctz
);
261 static struct mem_cgroup_per_node
*
262 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node
*mctz
)
264 struct mem_cgroup_per_node
*mz
;
268 if (!mctz
->rb_rightmost
)
269 goto done
; /* Nothing to reclaim from */
271 mz
= rb_entry(mctz
->rb_rightmost
,
272 struct mem_cgroup_per_node
, tree_node
);
274 * Remove the node now but someone else can add it back,
275 * we will to add it back at the end of reclaim to its correct
276 * position in the tree.
278 __mem_cgroup_remove_exceeded(mz
, mctz
);
279 if (!soft_limit_excess(mz
->memcg
) ||
280 !css_tryget(&mz
->memcg
->css
))
286 static struct mem_cgroup_per_node
*
287 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node
*mctz
)
289 struct mem_cgroup_per_node
*mz
;
291 spin_lock_irq(&mctz
->lock
);
292 mz
= __mem_cgroup_largest_soft_limit_node(mctz
);
293 spin_unlock_irq(&mctz
->lock
);
297 static int mem_cgroup_soft_reclaim(struct mem_cgroup
*root_memcg
,
300 unsigned long *total_scanned
)
302 struct mem_cgroup
*victim
= NULL
;
305 unsigned long excess
;
306 unsigned long nr_scanned
;
307 struct mem_cgroup_reclaim_cookie reclaim
= {
311 excess
= soft_limit_excess(root_memcg
);
314 victim
= mem_cgroup_iter(root_memcg
, victim
, &reclaim
);
319 * If we have not been able to reclaim
320 * anything, it might because there are
321 * no reclaimable pages under this hierarchy
326 * We want to do more targeted reclaim.
327 * excess >> 2 is not to excessive so as to
328 * reclaim too much, nor too less that we keep
329 * coming back to reclaim from this cgroup
331 if (total
>= (excess
>> 2) ||
332 (loop
> MEM_CGROUP_MAX_RECLAIM_LOOPS
))
337 total
+= mem_cgroup_shrink_node(victim
, gfp_mask
, false,
339 *total_scanned
+= nr_scanned
;
340 if (!soft_limit_excess(root_memcg
))
343 mem_cgroup_iter_break(root_memcg
, victim
);
347 unsigned long memcg1_soft_limit_reclaim(pg_data_t
*pgdat
, int order
,
349 unsigned long *total_scanned
)
351 unsigned long nr_reclaimed
= 0;
352 struct mem_cgroup_per_node
*mz
, *next_mz
= NULL
;
353 unsigned long reclaimed
;
355 struct mem_cgroup_tree_per_node
*mctz
;
356 unsigned long excess
;
358 if (lru_gen_enabled())
364 mctz
= soft_limit_tree
.rb_tree_per_node
[pgdat
->node_id
];
367 * Do not even bother to check the largest node if the root
368 * is empty. Do it lockless to prevent lock bouncing. Races
369 * are acceptable as soft limit is best effort anyway.
371 if (!mctz
|| RB_EMPTY_ROOT(&mctz
->rb_root
))
375 * This loop can run a while, specially if mem_cgroup's continuously
376 * keep exceeding their soft limit and putting the system under
383 mz
= mem_cgroup_largest_soft_limit_node(mctz
);
387 reclaimed
= mem_cgroup_soft_reclaim(mz
->memcg
, pgdat
,
388 gfp_mask
, total_scanned
);
389 nr_reclaimed
+= reclaimed
;
390 spin_lock_irq(&mctz
->lock
);
393 * If we failed to reclaim anything from this memory cgroup
394 * it is time to move on to the next cgroup
398 next_mz
= __mem_cgroup_largest_soft_limit_node(mctz
);
400 excess
= soft_limit_excess(mz
->memcg
);
402 * One school of thought says that we should not add
403 * back the node to the tree if reclaim returns 0.
404 * But our reclaim could return 0, simply because due
405 * to priority we are exposing a smaller subset of
406 * memory to reclaim from. Consider this as a longer
409 /* If excess == 0, no tree ops */
410 __mem_cgroup_insert_exceeded(mz
, mctz
, excess
);
411 spin_unlock_irq(&mctz
->lock
);
412 css_put(&mz
->memcg
->css
);
415 * Could not reclaim anything and there are no more
416 * mem cgroups to try or we seem to be looping without
417 * reclaiming anything.
421 loop
> MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS
))
423 } while (!nr_reclaimed
);
425 css_put(&next_mz
->memcg
->css
);
430 * A routine for checking "mem" is under move_account() or not.
432 * Checking a cgroup is mc.from or mc.to or under hierarchy of
433 * moving cgroups. This is for waiting at high-memory pressure
436 static bool mem_cgroup_under_move(struct mem_cgroup
*memcg
)
438 struct mem_cgroup
*from
;
439 struct mem_cgroup
*to
;
442 * Unlike task_move routines, we access mc.to, mc.from not under
443 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
451 ret
= mem_cgroup_is_descendant(from
, memcg
) ||
452 mem_cgroup_is_descendant(to
, memcg
);
454 spin_unlock(&mc
.lock
);
458 bool memcg1_wait_acct_move(struct mem_cgroup
*memcg
)
460 if (mc
.moving_task
&& current
!= mc
.moving_task
) {
461 if (mem_cgroup_under_move(memcg
)) {
463 prepare_to_wait(&mc
.waitq
, &wait
, TASK_INTERRUPTIBLE
);
464 /* moving charge context might have finished. */
467 finish_wait(&mc
.waitq
, &wait
);
475 * folio_memcg_lock - Bind a folio to its memcg.
478 * This function prevents unlocked LRU folios from being moved to
481 * It ensures lifetime of the bound memcg. The caller is responsible
482 * for the lifetime of the folio.
484 void folio_memcg_lock(struct folio
*folio
)
486 struct mem_cgroup
*memcg
;
490 * The RCU lock is held throughout the transaction. The fast
491 * path can get away without acquiring the memcg->move_lock
492 * because page moving starts with an RCU grace period.
496 if (mem_cgroup_disabled())
499 memcg
= folio_memcg(folio
);
500 if (unlikely(!memcg
))
503 #ifdef CONFIG_PROVE_LOCKING
504 local_irq_save(flags
);
505 might_lock(&memcg
->move_lock
);
506 local_irq_restore(flags
);
509 if (atomic_read(&memcg
->moving_account
) <= 0)
512 spin_lock_irqsave(&memcg
->move_lock
, flags
);
513 if (memcg
!= folio_memcg(folio
)) {
514 spin_unlock_irqrestore(&memcg
->move_lock
, flags
);
519 * When charge migration first begins, we can have multiple
520 * critical sections holding the fast-path RCU lock and one
521 * holding the slowpath move_lock. Track the task who has the
522 * move_lock for folio_memcg_unlock().
524 memcg
->move_lock_task
= current
;
525 memcg
->move_lock_flags
= flags
;
528 static void __folio_memcg_unlock(struct mem_cgroup
*memcg
)
530 if (memcg
&& memcg
->move_lock_task
== current
) {
531 unsigned long flags
= memcg
->move_lock_flags
;
533 memcg
->move_lock_task
= NULL
;
534 memcg
->move_lock_flags
= 0;
536 spin_unlock_irqrestore(&memcg
->move_lock
, flags
);
543 * folio_memcg_unlock - Release the binding between a folio and its memcg.
546 * This releases the binding created by folio_memcg_lock(). This does
547 * not change the accounting of this folio to its memcg, but it does
548 * permit others to change it.
550 void folio_memcg_unlock(struct folio
*folio
)
552 __folio_memcg_unlock(folio_memcg(folio
));
557 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
558 * @entry: swap entry to be moved
559 * @from: mem_cgroup which the entry is moved from
560 * @to: mem_cgroup which the entry is moved to
562 * It succeeds only when the swap_cgroup's record for this entry is the same
563 * as the mem_cgroup's id of @from.
565 * Returns 0 on success, -EINVAL on failure.
567 * The caller must have charged to @to, IOW, called page_counter_charge() about
568 * both res and memsw, and called css_get().
570 static int mem_cgroup_move_swap_account(swp_entry_t entry
,
571 struct mem_cgroup
*from
, struct mem_cgroup
*to
)
573 unsigned short old_id
, new_id
;
575 old_id
= mem_cgroup_id(from
);
576 new_id
= mem_cgroup_id(to
);
578 if (swap_cgroup_cmpxchg(entry
, old_id
, new_id
) == old_id
) {
579 mod_memcg_state(from
, MEMCG_SWAP
, -1);
580 mod_memcg_state(to
, MEMCG_SWAP
, 1);
586 static inline int mem_cgroup_move_swap_account(swp_entry_t entry
,
587 struct mem_cgroup
*from
, struct mem_cgroup
*to
)
593 static u64
mem_cgroup_move_charge_read(struct cgroup_subsys_state
*css
,
596 return mem_cgroup_from_css(css
)->move_charge_at_immigrate
;
600 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state
*css
,
601 struct cftype
*cft
, u64 val
)
603 struct mem_cgroup
*memcg
= mem_cgroup_from_css(css
);
605 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
606 "Please report your usecase to linux-mm@kvack.org if you "
607 "depend on this functionality.\n");
609 if (val
& ~MOVE_MASK
)
613 * No kind of locking is needed in here, because ->can_attach() will
614 * check this value once in the beginning of the process, and then carry
615 * on with stale data. This means that changes to this value will only
616 * affect task migrations starting after the change.
618 memcg
->move_charge_at_immigrate
= val
;
622 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state
*css
,
623 struct cftype
*cft
, u64 val
)
630 /* Handlers for move charge at task migration. */
631 static int mem_cgroup_do_precharge(unsigned long count
)
635 /* Try a single bulk charge without reclaim first, kswapd may wake */
636 ret
= try_charge(mc
.to
, GFP_KERNEL
& ~__GFP_DIRECT_RECLAIM
, count
);
638 mc
.precharge
+= count
;
642 /* Try charges one by one with reclaim, but do not retry */
644 ret
= try_charge(mc
.to
, GFP_KERNEL
| __GFP_NORETRY
, 1);
658 enum mc_target_type
{
665 static struct page
*mc_handle_present_pte(struct vm_area_struct
*vma
,
666 unsigned long addr
, pte_t ptent
)
668 struct page
*page
= vm_normal_page(vma
, addr
, ptent
);
672 if (PageAnon(page
)) {
673 if (!(mc
.flags
& MOVE_ANON
))
676 if (!(mc
.flags
& MOVE_FILE
))
684 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
685 static struct page
*mc_handle_swap_pte(struct vm_area_struct
*vma
,
686 pte_t ptent
, swp_entry_t
*entry
)
688 struct page
*page
= NULL
;
689 swp_entry_t ent
= pte_to_swp_entry(ptent
);
691 if (!(mc
.flags
& MOVE_ANON
))
695 * Handle device private pages that are not accessible by the CPU, but
696 * stored as special swap entries in the page table.
698 if (is_device_private_entry(ent
)) {
699 page
= pfn_swap_entry_to_page(ent
);
700 if (!get_page_unless_zero(page
))
705 if (non_swap_entry(ent
))
709 * Because swap_cache_get_folio() updates some statistics counter,
710 * we call find_get_page() with swapper_space directly.
712 page
= find_get_page(swap_address_space(ent
), swap_cache_index(ent
));
713 entry
->val
= ent
.val
;
718 static struct page
*mc_handle_swap_pte(struct vm_area_struct
*vma
,
719 pte_t ptent
, swp_entry_t
*entry
)
725 static struct page
*mc_handle_file_pte(struct vm_area_struct
*vma
,
726 unsigned long addr
, pte_t ptent
)
731 if (!vma
->vm_file
) /* anonymous vma */
733 if (!(mc
.flags
& MOVE_FILE
))
736 /* folio is moved even if it's not RSS of this task(page-faulted). */
737 /* shmem/tmpfs may report page out on swap: account for that too. */
738 index
= linear_page_index(vma
, addr
);
739 folio
= filemap_get_incore_folio(vma
->vm_file
->f_mapping
, index
);
742 return folio_file_page(folio
, index
);
745 static void memcg1_check_events(struct mem_cgroup
*memcg
, int nid
);
746 static void memcg1_charge_statistics(struct mem_cgroup
*memcg
, int nr_pages
);
749 * mem_cgroup_move_account - move account of the folio
751 * @compound: charge the page as compound or small page
752 * @from: mem_cgroup which the folio is moved from.
753 * @to: mem_cgroup which the folio is moved to. @from != @to.
755 * The folio must be locked and not on the LRU.
757 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
760 static int mem_cgroup_move_account(struct folio
*folio
,
762 struct mem_cgroup
*from
,
763 struct mem_cgroup
*to
)
765 struct lruvec
*from_vec
, *to_vec
;
766 struct pglist_data
*pgdat
;
767 unsigned int nr_pages
= compound
? folio_nr_pages(folio
) : 1;
770 VM_BUG_ON(from
== to
);
771 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
772 VM_BUG_ON_FOLIO(folio_test_lru(folio
), folio
);
773 VM_BUG_ON(compound
&& !folio_test_large(folio
));
776 if (folio_memcg(folio
) != from
)
779 pgdat
= folio_pgdat(folio
);
780 from_vec
= mem_cgroup_lruvec(from
, pgdat
);
781 to_vec
= mem_cgroup_lruvec(to
, pgdat
);
783 folio_memcg_lock(folio
);
785 if (folio_test_anon(folio
)) {
786 if (folio_mapped(folio
)) {
787 __mod_lruvec_state(from_vec
, NR_ANON_MAPPED
, -nr_pages
);
788 __mod_lruvec_state(to_vec
, NR_ANON_MAPPED
, nr_pages
);
789 if (folio_test_pmd_mappable(folio
)) {
790 __mod_lruvec_state(from_vec
, NR_ANON_THPS
,
792 __mod_lruvec_state(to_vec
, NR_ANON_THPS
,
797 __mod_lruvec_state(from_vec
, NR_FILE_PAGES
, -nr_pages
);
798 __mod_lruvec_state(to_vec
, NR_FILE_PAGES
, nr_pages
);
800 if (folio_test_swapbacked(folio
)) {
801 __mod_lruvec_state(from_vec
, NR_SHMEM
, -nr_pages
);
802 __mod_lruvec_state(to_vec
, NR_SHMEM
, nr_pages
);
805 if (folio_mapped(folio
)) {
806 __mod_lruvec_state(from_vec
, NR_FILE_MAPPED
, -nr_pages
);
807 __mod_lruvec_state(to_vec
, NR_FILE_MAPPED
, nr_pages
);
810 if (folio_test_dirty(folio
)) {
811 struct address_space
*mapping
= folio_mapping(folio
);
813 if (mapping_can_writeback(mapping
)) {
814 __mod_lruvec_state(from_vec
, NR_FILE_DIRTY
,
816 __mod_lruvec_state(to_vec
, NR_FILE_DIRTY
,
823 if (folio_test_swapcache(folio
)) {
824 __mod_lruvec_state(from_vec
, NR_SWAPCACHE
, -nr_pages
);
825 __mod_lruvec_state(to_vec
, NR_SWAPCACHE
, nr_pages
);
828 if (folio_test_writeback(folio
)) {
829 __mod_lruvec_state(from_vec
, NR_WRITEBACK
, -nr_pages
);
830 __mod_lruvec_state(to_vec
, NR_WRITEBACK
, nr_pages
);
834 * All state has been migrated, let's switch to the new memcg.
836 * It is safe to change page's memcg here because the page
837 * is referenced, charged, isolated, and locked: we can't race
838 * with (un)charging, migration, LRU putback, or anything else
839 * that would rely on a stable page's memory cgroup.
841 * Note that folio_memcg_lock is a memcg lock, not a page lock,
842 * to save space. As soon as we switch page's memory cgroup to a
843 * new memcg that isn't locked, the above state can change
844 * concurrently again. Make sure we're truly done with it.
851 folio
->memcg_data
= (unsigned long)to
;
853 __folio_memcg_unlock(from
);
856 nid
= folio_nid(folio
);
859 memcg1_charge_statistics(to
, nr_pages
);
860 memcg1_check_events(to
, nid
);
861 memcg1_charge_statistics(from
, -nr_pages
);
862 memcg1_check_events(from
, nid
);
869 * get_mctgt_type - get target type of moving charge
870 * @vma: the vma the pte to be checked belongs
871 * @addr: the address corresponding to the pte to be checked
872 * @ptent: the pte to be checked
873 * @target: the pointer the target page or swap ent will be stored(can be NULL)
875 * Context: Called with pte lock held.
877 * * MC_TARGET_NONE - If the pte is not a target for move charge.
878 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
879 * move charge. If @target is not NULL, the folio is stored in target->folio
880 * with extra refcnt taken (Caller should release it).
881 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
882 * target for charge migration. If @target is not NULL, the entry is
883 * stored in target->ent.
884 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
885 * thus not on the lru. For now such page is charged like a regular page
886 * would be as it is just special memory taking the place of a regular page.
887 * See Documentations/vm/hmm.txt and include/linux/hmm.h
889 static enum mc_target_type
get_mctgt_type(struct vm_area_struct
*vma
,
890 unsigned long addr
, pte_t ptent
, union mc_target
*target
)
892 struct page
*page
= NULL
;
894 enum mc_target_type ret
= MC_TARGET_NONE
;
895 swp_entry_t ent
= { .val
= 0 };
897 if (pte_present(ptent
))
898 page
= mc_handle_present_pte(vma
, addr
, ptent
);
899 else if (pte_none_mostly(ptent
))
901 * PTE markers should be treated as a none pte here, separated
902 * from other swap handling below.
904 page
= mc_handle_file_pte(vma
, addr
, ptent
);
905 else if (is_swap_pte(ptent
))
906 page
= mc_handle_swap_pte(vma
, ptent
, &ent
);
909 folio
= page_folio(page
);
910 if (target
&& page
) {
911 if (!folio_trylock(folio
)) {
916 * page_mapped() must be stable during the move. This
917 * pte is locked, so if it's present, the page cannot
918 * become unmapped. If it isn't, we have only partial
919 * control over the mapped state: the page lock will
920 * prevent new faults against pagecache and swapcache,
921 * so an unmapped page cannot become mapped. However,
922 * if the page is already mapped elsewhere, it can
923 * unmap, and there is nothing we can do about it.
924 * Alas, skip moving the page in this case.
926 if (!pte_present(ptent
) && page_mapped(page
)) {
933 if (!page
&& !ent
.val
)
937 * Do only loose check w/o serialization.
938 * mem_cgroup_move_account() checks the page is valid or
939 * not under LRU exclusion.
941 if (folio_memcg(folio
) == mc
.from
) {
942 ret
= MC_TARGET_PAGE
;
943 if (folio_is_device_private(folio
) ||
944 folio_is_device_coherent(folio
))
945 ret
= MC_TARGET_DEVICE
;
947 target
->folio
= folio
;
949 if (!ret
|| !target
) {
956 * There is a swap entry and a page doesn't exist or isn't charged.
957 * But we cannot move a tail-page in a THP.
959 if (ent
.val
&& !ret
&& (!page
|| !PageTransCompound(page
)) &&
960 mem_cgroup_id(mc
.from
) == lookup_swap_cgroup_id(ent
)) {
961 ret
= MC_TARGET_SWAP
;
968 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
970 * We don't consider PMD mapped swapping or file mapped pages because THP does
971 * not support them for now.
972 * Caller should make sure that pmd_trans_huge(pmd) is true.
974 static enum mc_target_type
get_mctgt_type_thp(struct vm_area_struct
*vma
,
975 unsigned long addr
, pmd_t pmd
, union mc_target
*target
)
977 struct page
*page
= NULL
;
979 enum mc_target_type ret
= MC_TARGET_NONE
;
981 if (unlikely(is_swap_pmd(pmd
))) {
982 VM_BUG_ON(thp_migration_supported() &&
983 !is_pmd_migration_entry(pmd
));
986 page
= pmd_page(pmd
);
987 VM_BUG_ON_PAGE(!page
|| !PageHead(page
), page
);
988 folio
= page_folio(page
);
989 if (!(mc
.flags
& MOVE_ANON
))
991 if (folio_memcg(folio
) == mc
.from
) {
992 ret
= MC_TARGET_PAGE
;
995 if (!folio_trylock(folio
)) {
997 return MC_TARGET_NONE
;
999 target
->folio
= folio
;
1005 static inline enum mc_target_type
get_mctgt_type_thp(struct vm_area_struct
*vma
,
1006 unsigned long addr
, pmd_t pmd
, union mc_target
*target
)
1008 return MC_TARGET_NONE
;
1012 static int mem_cgroup_count_precharge_pte_range(pmd_t
*pmd
,
1013 unsigned long addr
, unsigned long end
,
1014 struct mm_walk
*walk
)
1016 struct vm_area_struct
*vma
= walk
->vma
;
1020 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1023 * Note their can not be MC_TARGET_DEVICE for now as we do not
1024 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
1025 * this might change.
1027 if (get_mctgt_type_thp(vma
, addr
, *pmd
, NULL
) == MC_TARGET_PAGE
)
1028 mc
.precharge
+= HPAGE_PMD_NR
;
1033 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
1036 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
1037 if (get_mctgt_type(vma
, addr
, ptep_get(pte
), NULL
))
1038 mc
.precharge
++; /* increment precharge temporarily */
1039 pte_unmap_unlock(pte
- 1, ptl
);
1045 static const struct mm_walk_ops precharge_walk_ops
= {
1046 .pmd_entry
= mem_cgroup_count_precharge_pte_range
,
1047 .walk_lock
= PGWALK_RDLOCK
,
1050 static unsigned long mem_cgroup_count_precharge(struct mm_struct
*mm
)
1052 unsigned long precharge
;
1055 walk_page_range(mm
, 0, ULONG_MAX
, &precharge_walk_ops
, NULL
);
1056 mmap_read_unlock(mm
);
1058 precharge
= mc
.precharge
;
1064 static int mem_cgroup_precharge_mc(struct mm_struct
*mm
)
1066 unsigned long precharge
= mem_cgroup_count_precharge(mm
);
1068 VM_BUG_ON(mc
.moving_task
);
1069 mc
.moving_task
= current
;
1070 return mem_cgroup_do_precharge(precharge
);
1073 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
1074 static void __mem_cgroup_clear_mc(void)
1076 struct mem_cgroup
*from
= mc
.from
;
1077 struct mem_cgroup
*to
= mc
.to
;
1079 /* we must uncharge all the leftover precharges from mc.to */
1081 mem_cgroup_cancel_charge(mc
.to
, mc
.precharge
);
1085 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
1086 * we must uncharge here.
1088 if (mc
.moved_charge
) {
1089 mem_cgroup_cancel_charge(mc
.from
, mc
.moved_charge
);
1090 mc
.moved_charge
= 0;
1092 /* we must fixup refcnts and charges */
1093 if (mc
.moved_swap
) {
1094 /* uncharge swap account from the old cgroup */
1095 if (!mem_cgroup_is_root(mc
.from
))
1096 page_counter_uncharge(&mc
.from
->memsw
, mc
.moved_swap
);
1098 mem_cgroup_id_put_many(mc
.from
, mc
.moved_swap
);
1101 * we charged both to->memory and to->memsw, so we
1102 * should uncharge to->memory.
1104 if (!mem_cgroup_is_root(mc
.to
))
1105 page_counter_uncharge(&mc
.to
->memory
, mc
.moved_swap
);
1109 memcg1_oom_recover(from
);
1110 memcg1_oom_recover(to
);
1111 wake_up_all(&mc
.waitq
);
1114 static void mem_cgroup_clear_mc(void)
1116 struct mm_struct
*mm
= mc
.mm
;
1119 * we must clear moving_task before waking up waiters at the end of
1122 mc
.moving_task
= NULL
;
1123 __mem_cgroup_clear_mc();
1124 spin_lock(&mc
.lock
);
1128 spin_unlock(&mc
.lock
);
1133 int memcg1_can_attach(struct cgroup_taskset
*tset
)
1135 struct cgroup_subsys_state
*css
;
1136 struct mem_cgroup
*memcg
= NULL
; /* unneeded init to make gcc happy */
1137 struct mem_cgroup
*from
;
1138 struct task_struct
*leader
, *p
;
1139 struct mm_struct
*mm
;
1140 unsigned long move_flags
;
1143 /* charge immigration isn't supported on the default hierarchy */
1144 if (cgroup_subsys_on_dfl(memory_cgrp_subsys
))
1148 * Multi-process migrations only happen on the default hierarchy
1149 * where charge immigration is not used. Perform charge
1150 * immigration if @tset contains a leader and whine if there are
1154 cgroup_taskset_for_each_leader(leader
, css
, tset
) {
1157 memcg
= mem_cgroup_from_css(css
);
1163 * We are now committed to this value whatever it is. Changes in this
1164 * tunable will only affect upcoming migrations, not the current one.
1165 * So we need to save it, and keep it going.
1167 move_flags
= READ_ONCE(memcg
->move_charge_at_immigrate
);
1171 from
= mem_cgroup_from_task(p
);
1173 VM_BUG_ON(from
== memcg
);
1175 mm
= get_task_mm(p
);
1178 /* We move charges only when we move a owner of the mm */
1179 if (mm
->owner
== p
) {
1182 VM_BUG_ON(mc
.precharge
);
1183 VM_BUG_ON(mc
.moved_charge
);
1184 VM_BUG_ON(mc
.moved_swap
);
1186 spin_lock(&mc
.lock
);
1190 mc
.flags
= move_flags
;
1191 spin_unlock(&mc
.lock
);
1192 /* We set mc.moving_task later */
1194 ret
= mem_cgroup_precharge_mc(mm
);
1196 mem_cgroup_clear_mc();
1203 void memcg1_cancel_attach(struct cgroup_taskset
*tset
)
1206 mem_cgroup_clear_mc();
1209 static int mem_cgroup_move_charge_pte_range(pmd_t
*pmd
,
1210 unsigned long addr
, unsigned long end
,
1211 struct mm_walk
*walk
)
1214 struct vm_area_struct
*vma
= walk
->vma
;
1217 enum mc_target_type target_type
;
1218 union mc_target target
;
1219 struct folio
*folio
;
1221 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1223 if (mc
.precharge
< HPAGE_PMD_NR
) {
1227 target_type
= get_mctgt_type_thp(vma
, addr
, *pmd
, &target
);
1228 if (target_type
== MC_TARGET_PAGE
) {
1229 folio
= target
.folio
;
1230 if (folio_isolate_lru(folio
)) {
1231 if (!mem_cgroup_move_account(folio
, true,
1233 mc
.precharge
-= HPAGE_PMD_NR
;
1234 mc
.moved_charge
+= HPAGE_PMD_NR
;
1236 folio_putback_lru(folio
);
1238 folio_unlock(folio
);
1240 } else if (target_type
== MC_TARGET_DEVICE
) {
1241 folio
= target
.folio
;
1242 if (!mem_cgroup_move_account(folio
, true,
1244 mc
.precharge
-= HPAGE_PMD_NR
;
1245 mc
.moved_charge
+= HPAGE_PMD_NR
;
1247 folio_unlock(folio
);
1255 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
1258 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1259 pte_t ptent
= ptep_get(pte
++);
1260 bool device
= false;
1266 switch (get_mctgt_type(vma
, addr
, ptent
, &target
)) {
1267 case MC_TARGET_DEVICE
:
1270 case MC_TARGET_PAGE
:
1271 folio
= target
.folio
;
1273 * We can have a part of the split pmd here. Moving it
1274 * can be done but it would be too convoluted so simply
1275 * ignore such a partial THP and keep it in original
1276 * memcg. There should be somebody mapping the head.
1278 if (folio_test_large(folio
))
1280 if (!device
&& !folio_isolate_lru(folio
))
1282 if (!mem_cgroup_move_account(folio
, false,
1285 /* we uncharge from mc.from later. */
1289 folio_putback_lru(folio
);
1290 put
: /* get_mctgt_type() gets & locks the page */
1291 folio_unlock(folio
);
1294 case MC_TARGET_SWAP
:
1296 if (!mem_cgroup_move_swap_account(ent
, mc
.from
, mc
.to
)) {
1298 mem_cgroup_id_get_many(mc
.to
, 1);
1299 /* we fixup other refcnts and charges later. */
1307 pte_unmap_unlock(pte
- 1, ptl
);
1312 * We have consumed all precharges we got in can_attach().
1313 * We try charge one by one, but don't do any additional
1314 * charges to mc.to if we have failed in charge once in attach()
1317 ret
= mem_cgroup_do_precharge(1);
1325 static const struct mm_walk_ops charge_walk_ops
= {
1326 .pmd_entry
= mem_cgroup_move_charge_pte_range
,
1327 .walk_lock
= PGWALK_RDLOCK
,
1330 static void mem_cgroup_move_charge(void)
1332 lru_add_drain_all();
1334 * Signal folio_memcg_lock() to take the memcg's move_lock
1335 * while we're moving its pages to another memcg. Then wait
1336 * for already started RCU-only updates to finish.
1338 atomic_inc(&mc
.from
->moving_account
);
1341 if (unlikely(!mmap_read_trylock(mc
.mm
))) {
1343 * Someone who are holding the mmap_lock might be waiting in
1344 * waitq. So we cancel all extra charges, wake up all waiters,
1345 * and retry. Because we cancel precharges, we might not be able
1346 * to move enough charges, but moving charge is a best-effort
1347 * feature anyway, so it wouldn't be a big problem.
1349 __mem_cgroup_clear_mc();
1354 * When we have consumed all precharges and failed in doing
1355 * additional charge, the page walk just aborts.
1357 walk_page_range(mc
.mm
, 0, ULONG_MAX
, &charge_walk_ops
, NULL
);
1358 mmap_read_unlock(mc
.mm
);
1359 atomic_dec(&mc
.from
->moving_account
);
1362 void memcg1_move_task(void)
1365 mem_cgroup_move_charge();
1366 mem_cgroup_clear_mc();
1370 #else /* !CONFIG_MMU */
1371 int memcg1_can_attach(struct cgroup_taskset
*tset
)
1375 void memcg1_cancel_attach(struct cgroup_taskset
*tset
)
1378 void memcg1_move_task(void)
1383 static void __mem_cgroup_threshold(struct mem_cgroup
*memcg
, bool swap
)
1385 struct mem_cgroup_threshold_ary
*t
;
1386 unsigned long usage
;
1391 t
= rcu_dereference(memcg
->thresholds
.primary
);
1393 t
= rcu_dereference(memcg
->memsw_thresholds
.primary
);
1398 usage
= mem_cgroup_usage(memcg
, swap
);
1401 * current_threshold points to threshold just below or equal to usage.
1402 * If it's not true, a threshold was crossed after last
1403 * call of __mem_cgroup_threshold().
1405 i
= t
->current_threshold
;
1408 * Iterate backward over array of thresholds starting from
1409 * current_threshold and check if a threshold is crossed.
1410 * If none of thresholds below usage is crossed, we read
1411 * only one element of the array here.
1413 for (; i
>= 0 && unlikely(t
->entries
[i
].threshold
> usage
); i
--)
1414 eventfd_signal(t
->entries
[i
].eventfd
);
1416 /* i = current_threshold + 1 */
1420 * Iterate forward over array of thresholds starting from
1421 * current_threshold+1 and check if a threshold is crossed.
1422 * If none of thresholds above usage is crossed, we read
1423 * only one element of the array here.
1425 for (; i
< t
->size
&& unlikely(t
->entries
[i
].threshold
<= usage
); i
++)
1426 eventfd_signal(t
->entries
[i
].eventfd
);
1428 /* Update current_threshold */
1429 t
->current_threshold
= i
- 1;
1434 static void mem_cgroup_threshold(struct mem_cgroup
*memcg
)
1437 __mem_cgroup_threshold(memcg
, false);
1438 if (do_memsw_account())
1439 __mem_cgroup_threshold(memcg
, true);
1441 memcg
= parent_mem_cgroup(memcg
);
1445 /* Cgroup1: threshold notifications & softlimit tree updates */
1446 struct memcg1_events_percpu
{
1447 unsigned long nr_page_events
;
1448 unsigned long targets
[MEM_CGROUP_NTARGETS
];
1451 static void memcg1_charge_statistics(struct mem_cgroup
*memcg
, int nr_pages
)
1453 /* pagein of a big page is an event. So, ignore page size */
1455 __count_memcg_events(memcg
, PGPGIN
, 1);
1457 __count_memcg_events(memcg
, PGPGOUT
, 1);
1458 nr_pages
= -nr_pages
; /* for event */
1461 __this_cpu_add(memcg
->events_percpu
->nr_page_events
, nr_pages
);
1464 #define THRESHOLDS_EVENTS_TARGET 128
1465 #define SOFTLIMIT_EVENTS_TARGET 1024
1467 static bool memcg1_event_ratelimit(struct mem_cgroup
*memcg
,
1468 enum mem_cgroup_events_target target
)
1470 unsigned long val
, next
;
1472 val
= __this_cpu_read(memcg
->events_percpu
->nr_page_events
);
1473 next
= __this_cpu_read(memcg
->events_percpu
->targets
[target
]);
1474 /* from time_after() in jiffies.h */
1475 if ((long)(next
- val
) < 0) {
1477 case MEM_CGROUP_TARGET_THRESH
:
1478 next
= val
+ THRESHOLDS_EVENTS_TARGET
;
1480 case MEM_CGROUP_TARGET_SOFTLIMIT
:
1481 next
= val
+ SOFTLIMIT_EVENTS_TARGET
;
1486 __this_cpu_write(memcg
->events_percpu
->targets
[target
], next
);
1493 * Check events in order.
1496 static void memcg1_check_events(struct mem_cgroup
*memcg
, int nid
)
1498 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
1501 /* threshold event is triggered in finer grain than soft limit */
1502 if (unlikely(memcg1_event_ratelimit(memcg
,
1503 MEM_CGROUP_TARGET_THRESH
))) {
1506 do_softlimit
= memcg1_event_ratelimit(memcg
,
1507 MEM_CGROUP_TARGET_SOFTLIMIT
);
1508 mem_cgroup_threshold(memcg
);
1509 if (unlikely(do_softlimit
))
1510 memcg1_update_tree(memcg
, nid
);
1514 void memcg1_commit_charge(struct folio
*folio
, struct mem_cgroup
*memcg
)
1516 unsigned long flags
;
1518 local_irq_save(flags
);
1519 memcg1_charge_statistics(memcg
, folio_nr_pages(folio
));
1520 memcg1_check_events(memcg
, folio_nid(folio
));
1521 local_irq_restore(flags
);
1524 void memcg1_swapout(struct folio
*folio
, struct mem_cgroup
*memcg
)
1527 * Interrupts should be disabled here because the caller holds the
1528 * i_pages lock which is taken with interrupts-off. It is
1529 * important here to have the interrupts disabled because it is the
1530 * only synchronisation we have for updating the per-CPU variables.
1532 preempt_disable_nested();
1533 VM_WARN_ON_IRQS_ENABLED();
1534 memcg1_charge_statistics(memcg
, -folio_nr_pages(folio
));
1535 preempt_enable_nested();
1536 memcg1_check_events(memcg
, folio_nid(folio
));
1539 void memcg1_uncharge_batch(struct mem_cgroup
*memcg
, unsigned long pgpgout
,
1540 unsigned long nr_memory
, int nid
)
1542 unsigned long flags
;
1544 local_irq_save(flags
);
1545 __count_memcg_events(memcg
, PGPGOUT
, pgpgout
);
1546 __this_cpu_add(memcg
->events_percpu
->nr_page_events
, nr_memory
);
1547 memcg1_check_events(memcg
, nid
);
1548 local_irq_restore(flags
);
1551 static int compare_thresholds(const void *a
, const void *b
)
1553 const struct mem_cgroup_threshold
*_a
= a
;
1554 const struct mem_cgroup_threshold
*_b
= b
;
1556 if (_a
->threshold
> _b
->threshold
)
1559 if (_a
->threshold
< _b
->threshold
)
1565 static int mem_cgroup_oom_notify_cb(struct mem_cgroup
*memcg
)
1567 struct mem_cgroup_eventfd_list
*ev
;
1569 spin_lock(&memcg_oom_lock
);
1571 list_for_each_entry(ev
, &memcg
->oom_notify
, list
)
1572 eventfd_signal(ev
->eventfd
);
1574 spin_unlock(&memcg_oom_lock
);
1578 static void mem_cgroup_oom_notify(struct mem_cgroup
*memcg
)
1580 struct mem_cgroup
*iter
;
1582 for_each_mem_cgroup_tree(iter
, memcg
)
1583 mem_cgroup_oom_notify_cb(iter
);
1586 static int __mem_cgroup_usage_register_event(struct mem_cgroup
*memcg
,
1587 struct eventfd_ctx
*eventfd
, const char *args
, enum res_type type
)
1589 struct mem_cgroup_thresholds
*thresholds
;
1590 struct mem_cgroup_threshold_ary
*new;
1591 unsigned long threshold
;
1592 unsigned long usage
;
1595 ret
= page_counter_memparse(args
, "-1", &threshold
);
1599 mutex_lock(&memcg
->thresholds_lock
);
1602 thresholds
= &memcg
->thresholds
;
1603 usage
= mem_cgroup_usage(memcg
, false);
1604 } else if (type
== _MEMSWAP
) {
1605 thresholds
= &memcg
->memsw_thresholds
;
1606 usage
= mem_cgroup_usage(memcg
, true);
1610 /* Check if a threshold crossed before adding a new one */
1611 if (thresholds
->primary
)
1612 __mem_cgroup_threshold(memcg
, type
== _MEMSWAP
);
1614 size
= thresholds
->primary
? thresholds
->primary
->size
+ 1 : 1;
1616 /* Allocate memory for new array of thresholds */
1617 new = kmalloc(struct_size(new, entries
, size
), GFP_KERNEL
);
1624 /* Copy thresholds (if any) to new array */
1625 if (thresholds
->primary
)
1626 memcpy(new->entries
, thresholds
->primary
->entries
,
1627 flex_array_size(new, entries
, size
- 1));
1629 /* Add new threshold */
1630 new->entries
[size
- 1].eventfd
= eventfd
;
1631 new->entries
[size
- 1].threshold
= threshold
;
1633 /* Sort thresholds. Registering of new threshold isn't time-critical */
1634 sort(new->entries
, size
, sizeof(*new->entries
),
1635 compare_thresholds
, NULL
);
1637 /* Find current threshold */
1638 new->current_threshold
= -1;
1639 for (i
= 0; i
< size
; i
++) {
1640 if (new->entries
[i
].threshold
<= usage
) {
1642 * new->current_threshold will not be used until
1643 * rcu_assign_pointer(), so it's safe to increment
1646 ++new->current_threshold
;
1651 /* Free old spare buffer and save old primary buffer as spare */
1652 kfree(thresholds
->spare
);
1653 thresholds
->spare
= thresholds
->primary
;
1655 rcu_assign_pointer(thresholds
->primary
, new);
1657 /* To be sure that nobody uses thresholds */
1661 mutex_unlock(&memcg
->thresholds_lock
);
1666 static int mem_cgroup_usage_register_event(struct mem_cgroup
*memcg
,
1667 struct eventfd_ctx
*eventfd
, const char *args
)
1669 return __mem_cgroup_usage_register_event(memcg
, eventfd
, args
, _MEM
);
1672 static int memsw_cgroup_usage_register_event(struct mem_cgroup
*memcg
,
1673 struct eventfd_ctx
*eventfd
, const char *args
)
1675 return __mem_cgroup_usage_register_event(memcg
, eventfd
, args
, _MEMSWAP
);
1678 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup
*memcg
,
1679 struct eventfd_ctx
*eventfd
, enum res_type type
)
1681 struct mem_cgroup_thresholds
*thresholds
;
1682 struct mem_cgroup_threshold_ary
*new;
1683 unsigned long usage
;
1684 int i
, j
, size
, entries
;
1686 mutex_lock(&memcg
->thresholds_lock
);
1689 thresholds
= &memcg
->thresholds
;
1690 usage
= mem_cgroup_usage(memcg
, false);
1691 } else if (type
== _MEMSWAP
) {
1692 thresholds
= &memcg
->memsw_thresholds
;
1693 usage
= mem_cgroup_usage(memcg
, true);
1697 if (!thresholds
->primary
)
1700 /* Check if a threshold crossed before removing */
1701 __mem_cgroup_threshold(memcg
, type
== _MEMSWAP
);
1703 /* Calculate new number of threshold */
1705 for (i
= 0; i
< thresholds
->primary
->size
; i
++) {
1706 if (thresholds
->primary
->entries
[i
].eventfd
!= eventfd
)
1712 new = thresholds
->spare
;
1714 /* If no items related to eventfd have been cleared, nothing to do */
1718 /* Set thresholds array to NULL if we don't have thresholds */
1727 /* Copy thresholds and find current threshold */
1728 new->current_threshold
= -1;
1729 for (i
= 0, j
= 0; i
< thresholds
->primary
->size
; i
++) {
1730 if (thresholds
->primary
->entries
[i
].eventfd
== eventfd
)
1733 new->entries
[j
] = thresholds
->primary
->entries
[i
];
1734 if (new->entries
[j
].threshold
<= usage
) {
1736 * new->current_threshold will not be used
1737 * until rcu_assign_pointer(), so it's safe to increment
1740 ++new->current_threshold
;
1746 /* Swap primary and spare array */
1747 thresholds
->spare
= thresholds
->primary
;
1749 rcu_assign_pointer(thresholds
->primary
, new);
1751 /* To be sure that nobody uses thresholds */
1754 /* If all events are unregistered, free the spare array */
1756 kfree(thresholds
->spare
);
1757 thresholds
->spare
= NULL
;
1760 mutex_unlock(&memcg
->thresholds_lock
);
1763 static void mem_cgroup_usage_unregister_event(struct mem_cgroup
*memcg
,
1764 struct eventfd_ctx
*eventfd
)
1766 return __mem_cgroup_usage_unregister_event(memcg
, eventfd
, _MEM
);
1769 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup
*memcg
,
1770 struct eventfd_ctx
*eventfd
)
1772 return __mem_cgroup_usage_unregister_event(memcg
, eventfd
, _MEMSWAP
);
1775 static int mem_cgroup_oom_register_event(struct mem_cgroup
*memcg
,
1776 struct eventfd_ctx
*eventfd
, const char *args
)
1778 struct mem_cgroup_eventfd_list
*event
;
1780 event
= kmalloc(sizeof(*event
), GFP_KERNEL
);
1784 spin_lock(&memcg_oom_lock
);
1786 event
->eventfd
= eventfd
;
1787 list_add(&event
->list
, &memcg
->oom_notify
);
1789 /* already in OOM ? */
1790 if (memcg
->under_oom
)
1791 eventfd_signal(eventfd
);
1792 spin_unlock(&memcg_oom_lock
);
1797 static void mem_cgroup_oom_unregister_event(struct mem_cgroup
*memcg
,
1798 struct eventfd_ctx
*eventfd
)
1800 struct mem_cgroup_eventfd_list
*ev
, *tmp
;
1802 spin_lock(&memcg_oom_lock
);
1804 list_for_each_entry_safe(ev
, tmp
, &memcg
->oom_notify
, list
) {
1805 if (ev
->eventfd
== eventfd
) {
1806 list_del(&ev
->list
);
1811 spin_unlock(&memcg_oom_lock
);
1815 * DO NOT USE IN NEW FILES.
1817 * "cgroup.event_control" implementation.
1819 * This is way over-engineered. It tries to support fully configurable
1820 * events for each user. Such level of flexibility is completely
1821 * unnecessary especially in the light of the planned unified hierarchy.
1823 * Please deprecate this and replace with something simpler if at all
1828 * Unregister event and free resources.
1830 * Gets called from workqueue.
1832 static void memcg_event_remove(struct work_struct
*work
)
1834 struct mem_cgroup_event
*event
=
1835 container_of(work
, struct mem_cgroup_event
, remove
);
1836 struct mem_cgroup
*memcg
= event
->memcg
;
1838 remove_wait_queue(event
->wqh
, &event
->wait
);
1840 event
->unregister_event(memcg
, event
->eventfd
);
1842 /* Notify userspace the event is going away. */
1843 eventfd_signal(event
->eventfd
);
1845 eventfd_ctx_put(event
->eventfd
);
1847 css_put(&memcg
->css
);
1851 * Gets called on EPOLLHUP on eventfd when user closes it.
1853 * Called with wqh->lock held and interrupts disabled.
1855 static int memcg_event_wake(wait_queue_entry_t
*wait
, unsigned mode
,
1856 int sync
, void *key
)
1858 struct mem_cgroup_event
*event
=
1859 container_of(wait
, struct mem_cgroup_event
, wait
);
1860 struct mem_cgroup
*memcg
= event
->memcg
;
1861 __poll_t flags
= key_to_poll(key
);
1863 if (flags
& EPOLLHUP
) {
1865 * If the event has been detached at cgroup removal, we
1866 * can simply return knowing the other side will cleanup
1869 * We can't race against event freeing since the other
1870 * side will require wqh->lock via remove_wait_queue(),
1873 spin_lock(&memcg
->event_list_lock
);
1874 if (!list_empty(&event
->list
)) {
1875 list_del_init(&event
->list
);
1877 * We are in atomic context, but cgroup_event_remove()
1878 * may sleep, so we have to call it in workqueue.
1880 schedule_work(&event
->remove
);
1882 spin_unlock(&memcg
->event_list_lock
);
1888 static void memcg_event_ptable_queue_proc(struct file
*file
,
1889 wait_queue_head_t
*wqh
, poll_table
*pt
)
1891 struct mem_cgroup_event
*event
=
1892 container_of(pt
, struct mem_cgroup_event
, pt
);
1895 add_wait_queue(wqh
, &event
->wait
);
1899 * DO NOT USE IN NEW FILES.
1901 * Parse input and register new cgroup event handler.
1903 * Input must be in format '<event_fd> <control_fd> <args>'.
1904 * Interpretation of args is defined by control file implementation.
1906 static ssize_t
memcg_write_event_control(struct kernfs_open_file
*of
,
1907 char *buf
, size_t nbytes
, loff_t off
)
1909 struct cgroup_subsys_state
*css
= of_css(of
);
1910 struct mem_cgroup
*memcg
= mem_cgroup_from_css(css
);
1911 struct mem_cgroup_event
*event
;
1912 struct cgroup_subsys_state
*cfile_css
;
1913 unsigned int efd
, cfd
;
1916 struct dentry
*cdentry
;
1921 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
1924 buf
= strstrip(buf
);
1926 efd
= simple_strtoul(buf
, &endp
, 10);
1931 cfd
= simple_strtoul(buf
, &endp
, 10);
1934 else if (*endp
== ' ')
1939 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1943 event
->memcg
= memcg
;
1944 INIT_LIST_HEAD(&event
->list
);
1945 init_poll_funcptr(&event
->pt
, memcg_event_ptable_queue_proc
);
1946 init_waitqueue_func_entry(&event
->wait
, memcg_event_wake
);
1947 INIT_WORK(&event
->remove
, memcg_event_remove
);
1950 if (!fd_file(efile
)) {
1955 event
->eventfd
= eventfd_ctx_fileget(fd_file(efile
));
1956 if (IS_ERR(event
->eventfd
)) {
1957 ret
= PTR_ERR(event
->eventfd
);
1962 if (!fd_file(cfile
)) {
1964 goto out_put_eventfd
;
1967 /* the process need read permission on control file */
1968 /* AV: shouldn't we check that it's been opened for read instead? */
1969 ret
= file_permission(fd_file(cfile
), MAY_READ
);
1974 * The control file must be a regular cgroup1 file. As a regular cgroup
1975 * file can't be renamed, it's safe to access its name afterwards.
1977 cdentry
= fd_file(cfile
)->f_path
.dentry
;
1978 if (cdentry
->d_sb
->s_type
!= &cgroup_fs_type
|| !d_is_reg(cdentry
)) {
1984 * Determine the event callbacks and set them in @event. This used
1985 * to be done via struct cftype but cgroup core no longer knows
1986 * about these events. The following is crude but the whole thing
1987 * is for compatibility anyway.
1989 * DO NOT ADD NEW FILES.
1991 name
= cdentry
->d_name
.name
;
1993 if (!strcmp(name
, "memory.usage_in_bytes")) {
1994 event
->register_event
= mem_cgroup_usage_register_event
;
1995 event
->unregister_event
= mem_cgroup_usage_unregister_event
;
1996 } else if (!strcmp(name
, "memory.oom_control")) {
1997 pr_warn_once("oom_control is deprecated and will be removed. "
1998 "Please report your usecase to linux-mm-@kvack.org"
1999 " if you depend on this functionality. \n");
2000 event
->register_event
= mem_cgroup_oom_register_event
;
2001 event
->unregister_event
= mem_cgroup_oom_unregister_event
;
2002 } else if (!strcmp(name
, "memory.pressure_level")) {
2003 pr_warn_once("pressure_level is deprecated and will be removed. "
2004 "Please report your usecase to linux-mm-@kvack.org "
2005 "if you depend on this functionality. \n");
2006 event
->register_event
= vmpressure_register_event
;
2007 event
->unregister_event
= vmpressure_unregister_event
;
2008 } else if (!strcmp(name
, "memory.memsw.usage_in_bytes")) {
2009 event
->register_event
= memsw_cgroup_usage_register_event
;
2010 event
->unregister_event
= memsw_cgroup_usage_unregister_event
;
2017 * Verify @cfile should belong to @css. Also, remaining events are
2018 * automatically removed on cgroup destruction but the removal is
2019 * asynchronous, so take an extra ref on @css.
2021 cfile_css
= css_tryget_online_from_dir(cdentry
->d_parent
,
2022 &memory_cgrp_subsys
);
2024 if (IS_ERR(cfile_css
))
2026 if (cfile_css
!= css
) {
2031 ret
= event
->register_event(memcg
, event
->eventfd
, buf
);
2035 vfs_poll(fd_file(efile
), &event
->pt
);
2037 spin_lock_irq(&memcg
->event_list_lock
);
2038 list_add(&event
->list
, &memcg
->event_list
);
2039 spin_unlock_irq(&memcg
->event_list_lock
);
2051 eventfd_ctx_put(event
->eventfd
);
2060 void memcg1_memcg_init(struct mem_cgroup
*memcg
)
2062 INIT_LIST_HEAD(&memcg
->oom_notify
);
2063 mutex_init(&memcg
->thresholds_lock
);
2064 spin_lock_init(&memcg
->move_lock
);
2065 INIT_LIST_HEAD(&memcg
->event_list
);
2066 spin_lock_init(&memcg
->event_list_lock
);
2069 void memcg1_css_offline(struct mem_cgroup
*memcg
)
2071 struct mem_cgroup_event
*event
, *tmp
;
2074 * Unregister events and notify userspace.
2075 * Notify userspace about cgroup removing only after rmdir of cgroup
2076 * directory to avoid race between userspace and kernelspace.
2078 spin_lock_irq(&memcg
->event_list_lock
);
2079 list_for_each_entry_safe(event
, tmp
, &memcg
->event_list
, list
) {
2080 list_del_init(&event
->list
);
2081 schedule_work(&event
->remove
);
2083 spin_unlock_irq(&memcg
->event_list_lock
);
2087 * Check OOM-Killer is already running under our hierarchy.
2088 * If someone is running, return false.
2090 static bool mem_cgroup_oom_trylock(struct mem_cgroup
*memcg
)
2092 struct mem_cgroup
*iter
, *failed
= NULL
;
2094 spin_lock(&memcg_oom_lock
);
2096 for_each_mem_cgroup_tree(iter
, memcg
) {
2097 if (iter
->oom_lock
) {
2099 * this subtree of our hierarchy is already locked
2100 * so we cannot give a lock.
2103 mem_cgroup_iter_break(memcg
, iter
);
2106 iter
->oom_lock
= true;
2111 * OK, we failed to lock the whole subtree so we have
2112 * to clean up what we set up to the failing subtree
2114 for_each_mem_cgroup_tree(iter
, memcg
) {
2115 if (iter
== failed
) {
2116 mem_cgroup_iter_break(memcg
, iter
);
2119 iter
->oom_lock
= false;
2122 mutex_acquire(&memcg_oom_lock_dep_map
, 0, 1, _RET_IP_
);
2124 spin_unlock(&memcg_oom_lock
);
2129 static void mem_cgroup_oom_unlock(struct mem_cgroup
*memcg
)
2131 struct mem_cgroup
*iter
;
2133 spin_lock(&memcg_oom_lock
);
2134 mutex_release(&memcg_oom_lock_dep_map
, _RET_IP_
);
2135 for_each_mem_cgroup_tree(iter
, memcg
)
2136 iter
->oom_lock
= false;
2137 spin_unlock(&memcg_oom_lock
);
2140 static void mem_cgroup_mark_under_oom(struct mem_cgroup
*memcg
)
2142 struct mem_cgroup
*iter
;
2144 spin_lock(&memcg_oom_lock
);
2145 for_each_mem_cgroup_tree(iter
, memcg
)
2147 spin_unlock(&memcg_oom_lock
);
2150 static void mem_cgroup_unmark_under_oom(struct mem_cgroup
*memcg
)
2152 struct mem_cgroup
*iter
;
2155 * Be careful about under_oom underflows because a child memcg
2156 * could have been added after mem_cgroup_mark_under_oom.
2158 spin_lock(&memcg_oom_lock
);
2159 for_each_mem_cgroup_tree(iter
, memcg
)
2160 if (iter
->under_oom
> 0)
2162 spin_unlock(&memcg_oom_lock
);
2165 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq
);
2167 struct oom_wait_info
{
2168 struct mem_cgroup
*memcg
;
2169 wait_queue_entry_t wait
;
2172 static int memcg_oom_wake_function(wait_queue_entry_t
*wait
,
2173 unsigned mode
, int sync
, void *arg
)
2175 struct mem_cgroup
*wake_memcg
= (struct mem_cgroup
*)arg
;
2176 struct mem_cgroup
*oom_wait_memcg
;
2177 struct oom_wait_info
*oom_wait_info
;
2179 oom_wait_info
= container_of(wait
, struct oom_wait_info
, wait
);
2180 oom_wait_memcg
= oom_wait_info
->memcg
;
2182 if (!mem_cgroup_is_descendant(wake_memcg
, oom_wait_memcg
) &&
2183 !mem_cgroup_is_descendant(oom_wait_memcg
, wake_memcg
))
2185 return autoremove_wake_function(wait
, mode
, sync
, arg
);
2188 void memcg1_oom_recover(struct mem_cgroup
*memcg
)
2191 * For the following lockless ->under_oom test, the only required
2192 * guarantee is that it must see the state asserted by an OOM when
2193 * this function is called as a result of userland actions
2194 * triggered by the notification of the OOM. This is trivially
2195 * achieved by invoking mem_cgroup_mark_under_oom() before
2196 * triggering notification.
2198 if (memcg
&& memcg
->under_oom
)
2199 __wake_up(&memcg_oom_waitq
, TASK_NORMAL
, 0, memcg
);
2203 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2204 * @handle: actually kill/wait or just clean up the OOM state
2206 * This has to be called at the end of a page fault if the memcg OOM
2207 * handler was enabled.
2209 * Memcg supports userspace OOM handling where failed allocations must
2210 * sleep on a waitqueue until the userspace task resolves the
2211 * situation. Sleeping directly in the charge context with all kinds
2212 * of locks held is not a good idea, instead we remember an OOM state
2213 * in the task and mem_cgroup_oom_synchronize() has to be called at
2214 * the end of the page fault to complete the OOM handling.
2216 * Returns %true if an ongoing memcg OOM situation was detected and
2217 * completed, %false otherwise.
2219 bool mem_cgroup_oom_synchronize(bool handle
)
2221 struct mem_cgroup
*memcg
= current
->memcg_in_oom
;
2222 struct oom_wait_info owait
;
2225 /* OOM is global, do not handle */
2232 owait
.memcg
= memcg
;
2233 owait
.wait
.flags
= 0;
2234 owait
.wait
.func
= memcg_oom_wake_function
;
2235 owait
.wait
.private = current
;
2236 INIT_LIST_HEAD(&owait
.wait
.entry
);
2238 prepare_to_wait(&memcg_oom_waitq
, &owait
.wait
, TASK_KILLABLE
);
2239 mem_cgroup_mark_under_oom(memcg
);
2241 locked
= mem_cgroup_oom_trylock(memcg
);
2244 mem_cgroup_oom_notify(memcg
);
2247 mem_cgroup_unmark_under_oom(memcg
);
2248 finish_wait(&memcg_oom_waitq
, &owait
.wait
);
2251 mem_cgroup_oom_unlock(memcg
);
2253 current
->memcg_in_oom
= NULL
;
2254 css_put(&memcg
->css
);
2259 bool memcg1_oom_prepare(struct mem_cgroup
*memcg
, bool *locked
)
2262 * We are in the middle of the charge context here, so we
2263 * don't want to block when potentially sitting on a callstack
2264 * that holds all kinds of filesystem and mm locks.
2266 * cgroup1 allows disabling the OOM killer and waiting for outside
2267 * handling until the charge can succeed; remember the context and put
2268 * the task to sleep at the end of the page fault when all locks are
2271 * On the other hand, in-kernel OOM killer allows for an async victim
2272 * memory reclaim (oom_reaper) and that means that we are not solely
2273 * relying on the oom victim to make a forward progress and we can
2274 * invoke the oom killer here.
2276 * Please note that mem_cgroup_out_of_memory might fail to find a
2277 * victim and then we have to bail out from the charge path.
2279 if (READ_ONCE(memcg
->oom_kill_disable
)) {
2280 if (current
->in_user_fault
) {
2281 css_get(&memcg
->css
);
2282 current
->memcg_in_oom
= memcg
;
2287 mem_cgroup_mark_under_oom(memcg
);
2289 *locked
= mem_cgroup_oom_trylock(memcg
);
2292 mem_cgroup_oom_notify(memcg
);
2294 mem_cgroup_unmark_under_oom(memcg
);
2299 void memcg1_oom_finish(struct mem_cgroup
*memcg
, bool locked
)
2302 mem_cgroup_oom_unlock(memcg
);
2305 static DEFINE_MUTEX(memcg_max_mutex
);
2307 static int mem_cgroup_resize_max(struct mem_cgroup
*memcg
,
2308 unsigned long max
, bool memsw
)
2310 bool enlarge
= false;
2311 bool drained
= false;
2313 bool limits_invariant
;
2314 struct page_counter
*counter
= memsw
? &memcg
->memsw
: &memcg
->memory
;
2317 if (signal_pending(current
)) {
2322 mutex_lock(&memcg_max_mutex
);
2324 * Make sure that the new limit (memsw or memory limit) doesn't
2325 * break our basic invariant rule memory.max <= memsw.max.
2327 limits_invariant
= memsw
? max
>= READ_ONCE(memcg
->memory
.max
) :
2328 max
<= memcg
->memsw
.max
;
2329 if (!limits_invariant
) {
2330 mutex_unlock(&memcg_max_mutex
);
2334 if (max
> counter
->max
)
2336 ret
= page_counter_set_max(counter
, max
);
2337 mutex_unlock(&memcg_max_mutex
);
2343 drain_all_stock(memcg
);
2348 if (!try_to_free_mem_cgroup_pages(memcg
, 1, GFP_KERNEL
,
2349 memsw
? 0 : MEMCG_RECLAIM_MAY_SWAP
, NULL
)) {
2355 if (!ret
&& enlarge
)
2356 memcg1_oom_recover(memcg
);
2362 * Reclaims as many pages from the given memcg as possible.
2364 * Caller is responsible for holding css reference for memcg.
2366 static int mem_cgroup_force_empty(struct mem_cgroup
*memcg
)
2368 int nr_retries
= MAX_RECLAIM_RETRIES
;
2370 /* we call try-to-free pages for make this cgroup empty */
2371 lru_add_drain_all();
2373 drain_all_stock(memcg
);
2375 /* try to free all pages in this cgroup */
2376 while (nr_retries
&& page_counter_read(&memcg
->memory
)) {
2377 if (signal_pending(current
))
2380 if (!try_to_free_mem_cgroup_pages(memcg
, 1, GFP_KERNEL
,
2381 MEMCG_RECLAIM_MAY_SWAP
, NULL
))
2388 static ssize_t
mem_cgroup_force_empty_write(struct kernfs_open_file
*of
,
2389 char *buf
, size_t nbytes
,
2392 struct mem_cgroup
*memcg
= mem_cgroup_from_css(of_css(of
));
2394 if (mem_cgroup_is_root(memcg
))
2396 return mem_cgroup_force_empty(memcg
) ?: nbytes
;
2399 static u64
mem_cgroup_hierarchy_read(struct cgroup_subsys_state
*css
,
2405 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state
*css
,
2406 struct cftype
*cft
, u64 val
)
2411 pr_warn_once("Non-hierarchical mode is deprecated. "
2412 "Please report your usecase to linux-mm@kvack.org if you "
2413 "depend on this functionality.\n");
2418 static u64
mem_cgroup_read_u64(struct cgroup_subsys_state
*css
,
2421 struct mem_cgroup
*memcg
= mem_cgroup_from_css(css
);
2422 struct page_counter
*counter
;
2424 switch (MEMFILE_TYPE(cft
->private)) {
2426 counter
= &memcg
->memory
;
2429 counter
= &memcg
->memsw
;
2432 counter
= &memcg
->kmem
;
2435 counter
= &memcg
->tcpmem
;
2441 switch (MEMFILE_ATTR(cft
->private)) {
2443 if (counter
== &memcg
->memory
)
2444 return (u64
)mem_cgroup_usage(memcg
, false) * PAGE_SIZE
;
2445 if (counter
== &memcg
->memsw
)
2446 return (u64
)mem_cgroup_usage(memcg
, true) * PAGE_SIZE
;
2447 return (u64
)page_counter_read(counter
) * PAGE_SIZE
;
2449 return (u64
)counter
->max
* PAGE_SIZE
;
2451 return (u64
)counter
->watermark
* PAGE_SIZE
;
2453 return counter
->failcnt
;
2454 case RES_SOFT_LIMIT
:
2455 return (u64
)READ_ONCE(memcg
->soft_limit
) * PAGE_SIZE
;
2462 * This function doesn't do anything useful. Its only job is to provide a read
2463 * handler for a file so that cgroup_file_mode() will add read permissions.
2465 static int mem_cgroup_dummy_seq_show(__always_unused
struct seq_file
*m
,
2466 __always_unused
void *v
)
2471 static int memcg_update_tcp_max(struct mem_cgroup
*memcg
, unsigned long max
)
2475 mutex_lock(&memcg_max_mutex
);
2477 ret
= page_counter_set_max(&memcg
->tcpmem
, max
);
2481 if (!memcg
->tcpmem_active
) {
2483 * The active flag needs to be written after the static_key
2484 * update. This is what guarantees that the socket activation
2485 * function is the last one to run. See mem_cgroup_sk_alloc()
2486 * for details, and note that we don't mark any socket as
2487 * belonging to this memcg until that flag is up.
2489 * We need to do this, because static_keys will span multiple
2490 * sites, but we can't control their order. If we mark a socket
2491 * as accounted, but the accounting functions are not patched in
2492 * yet, we'll lose accounting.
2494 * We never race with the readers in mem_cgroup_sk_alloc(),
2495 * because when this value change, the code to process it is not
2498 static_branch_inc(&memcg_sockets_enabled_key
);
2499 memcg
->tcpmem_active
= true;
2502 mutex_unlock(&memcg_max_mutex
);
2507 * The user of this function is...
2510 static ssize_t
mem_cgroup_write(struct kernfs_open_file
*of
,
2511 char *buf
, size_t nbytes
, loff_t off
)
2513 struct mem_cgroup
*memcg
= mem_cgroup_from_css(of_css(of
));
2514 unsigned long nr_pages
;
2517 buf
= strstrip(buf
);
2518 ret
= page_counter_memparse(buf
, "-1", &nr_pages
);
2522 switch (MEMFILE_ATTR(of_cft(of
)->private)) {
2524 if (mem_cgroup_is_root(memcg
)) { /* Can't set limit on root */
2528 switch (MEMFILE_TYPE(of_cft(of
)->private)) {
2530 ret
= mem_cgroup_resize_max(memcg
, nr_pages
, false);
2533 ret
= mem_cgroup_resize_max(memcg
, nr_pages
, true);
2536 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
2537 "Writing any value to this file has no effect. "
2538 "Please report your usecase to linux-mm@kvack.org if you "
2539 "depend on this functionality.\n");
2543 pr_warn_once("kmem.tcp.limit_in_bytes is deprecated and will be removed. "
2544 "Please report your usecase to linux-mm@kvack.org if you "
2545 "depend on this functionality.\n");
2546 ret
= memcg_update_tcp_max(memcg
, nr_pages
);
2550 case RES_SOFT_LIMIT
:
2551 if (IS_ENABLED(CONFIG_PREEMPT_RT
)) {
2554 pr_warn_once("soft_limit_in_bytes is deprecated and will be removed. "
2555 "Please report your usecase to linux-mm@kvack.org if you "
2556 "depend on this functionality.\n");
2557 WRITE_ONCE(memcg
->soft_limit
, nr_pages
);
2562 return ret
?: nbytes
;
2565 static ssize_t
mem_cgroup_reset(struct kernfs_open_file
*of
, char *buf
,
2566 size_t nbytes
, loff_t off
)
2568 struct mem_cgroup
*memcg
= mem_cgroup_from_css(of_css(of
));
2569 struct page_counter
*counter
;
2571 switch (MEMFILE_TYPE(of_cft(of
)->private)) {
2573 counter
= &memcg
->memory
;
2576 counter
= &memcg
->memsw
;
2579 counter
= &memcg
->kmem
;
2582 counter
= &memcg
->tcpmem
;
2588 switch (MEMFILE_ATTR(of_cft(of
)->private)) {
2590 page_counter_reset_watermark(counter
);
2593 counter
->failcnt
= 0;
2604 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
2605 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
2606 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
2608 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup
*memcg
,
2609 int nid
, unsigned int lru_mask
, bool tree
)
2611 struct lruvec
*lruvec
= mem_cgroup_lruvec(memcg
, NODE_DATA(nid
));
2612 unsigned long nr
= 0;
2615 VM_BUG_ON((unsigned)nid
>= nr_node_ids
);
2618 if (!(BIT(lru
) & lru_mask
))
2621 nr
+= lruvec_page_state(lruvec
, NR_LRU_BASE
+ lru
);
2623 nr
+= lruvec_page_state_local(lruvec
, NR_LRU_BASE
+ lru
);
2628 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup
*memcg
,
2629 unsigned int lru_mask
,
2632 unsigned long nr
= 0;
2636 if (!(BIT(lru
) & lru_mask
))
2639 nr
+= memcg_page_state(memcg
, NR_LRU_BASE
+ lru
);
2641 nr
+= memcg_page_state_local(memcg
, NR_LRU_BASE
+ lru
);
2646 static int memcg_numa_stat_show(struct seq_file
*m
, void *v
)
2650 unsigned int lru_mask
;
2653 static const struct numa_stat stats
[] = {
2654 { "total", LRU_ALL
},
2655 { "file", LRU_ALL_FILE
},
2656 { "anon", LRU_ALL_ANON
},
2657 { "unevictable", BIT(LRU_UNEVICTABLE
) },
2659 const struct numa_stat
*stat
;
2661 struct mem_cgroup
*memcg
= mem_cgroup_from_seq(m
);
2663 mem_cgroup_flush_stats(memcg
);
2665 for (stat
= stats
; stat
< stats
+ ARRAY_SIZE(stats
); stat
++) {
2666 seq_printf(m
, "%s=%lu", stat
->name
,
2667 mem_cgroup_nr_lru_pages(memcg
, stat
->lru_mask
,
2669 for_each_node_state(nid
, N_MEMORY
)
2670 seq_printf(m
, " N%d=%lu", nid
,
2671 mem_cgroup_node_nr_lru_pages(memcg
, nid
,
2672 stat
->lru_mask
, false));
2676 for (stat
= stats
; stat
< stats
+ ARRAY_SIZE(stats
); stat
++) {
2678 seq_printf(m
, "hierarchical_%s=%lu", stat
->name
,
2679 mem_cgroup_nr_lru_pages(memcg
, stat
->lru_mask
,
2681 for_each_node_state(nid
, N_MEMORY
)
2682 seq_printf(m
, " N%d=%lu", nid
,
2683 mem_cgroup_node_nr_lru_pages(memcg
, nid
,
2684 stat
->lru_mask
, true));
2690 #endif /* CONFIG_NUMA */
2692 static const unsigned int memcg1_stats
[] = {
2695 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2702 WORKINGSET_REFAULT_ANON
,
2703 WORKINGSET_REFAULT_FILE
,
2710 static const char *const memcg1_stat_names
[] = {
2713 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2720 "workingset_refault_anon",
2721 "workingset_refault_file",
2728 /* Universal VM events cgroup1 shows, original sort order */
2729 static const unsigned int memcg1_events
[] = {
2736 void memcg1_stat_format(struct mem_cgroup
*memcg
, struct seq_buf
*s
)
2738 unsigned long memory
, memsw
;
2739 struct mem_cgroup
*mi
;
2742 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names
) != ARRAY_SIZE(memcg1_stats
));
2744 mem_cgroup_flush_stats(memcg
);
2746 for (i
= 0; i
< ARRAY_SIZE(memcg1_stats
); i
++) {
2749 nr
= memcg_page_state_local_output(memcg
, memcg1_stats
[i
]);
2750 seq_buf_printf(s
, "%s %lu\n", memcg1_stat_names
[i
], nr
);
2753 for (i
= 0; i
< ARRAY_SIZE(memcg1_events
); i
++)
2754 seq_buf_printf(s
, "%s %lu\n", vm_event_name(memcg1_events
[i
]),
2755 memcg_events_local(memcg
, memcg1_events
[i
]));
2757 for (i
= 0; i
< NR_LRU_LISTS
; i
++)
2758 seq_buf_printf(s
, "%s %lu\n", lru_list_name(i
),
2759 memcg_page_state_local(memcg
, NR_LRU_BASE
+ i
) *
2762 /* Hierarchical information */
2763 memory
= memsw
= PAGE_COUNTER_MAX
;
2764 for (mi
= memcg
; mi
; mi
= parent_mem_cgroup(mi
)) {
2765 memory
= min(memory
, READ_ONCE(mi
->memory
.max
));
2766 memsw
= min(memsw
, READ_ONCE(mi
->memsw
.max
));
2768 seq_buf_printf(s
, "hierarchical_memory_limit %llu\n",
2769 (u64
)memory
* PAGE_SIZE
);
2770 seq_buf_printf(s
, "hierarchical_memsw_limit %llu\n",
2771 (u64
)memsw
* PAGE_SIZE
);
2773 for (i
= 0; i
< ARRAY_SIZE(memcg1_stats
); i
++) {
2776 nr
= memcg_page_state_output(memcg
, memcg1_stats
[i
]);
2777 seq_buf_printf(s
, "total_%s %llu\n", memcg1_stat_names
[i
],
2781 for (i
= 0; i
< ARRAY_SIZE(memcg1_events
); i
++)
2782 seq_buf_printf(s
, "total_%s %llu\n",
2783 vm_event_name(memcg1_events
[i
]),
2784 (u64
)memcg_events(memcg
, memcg1_events
[i
]));
2786 for (i
= 0; i
< NR_LRU_LISTS
; i
++)
2787 seq_buf_printf(s
, "total_%s %llu\n", lru_list_name(i
),
2788 (u64
)memcg_page_state(memcg
, NR_LRU_BASE
+ i
) *
2791 #ifdef CONFIG_DEBUG_VM
2794 struct mem_cgroup_per_node
*mz
;
2795 unsigned long anon_cost
= 0;
2796 unsigned long file_cost
= 0;
2798 for_each_online_pgdat(pgdat
) {
2799 mz
= memcg
->nodeinfo
[pgdat
->node_id
];
2801 anon_cost
+= mz
->lruvec
.anon_cost
;
2802 file_cost
+= mz
->lruvec
.file_cost
;
2804 seq_buf_printf(s
, "anon_cost %lu\n", anon_cost
);
2805 seq_buf_printf(s
, "file_cost %lu\n", file_cost
);
2810 static u64
mem_cgroup_swappiness_read(struct cgroup_subsys_state
*css
,
2813 struct mem_cgroup
*memcg
= mem_cgroup_from_css(css
);
2815 return mem_cgroup_swappiness(memcg
);
2818 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state
*css
,
2819 struct cftype
*cft
, u64 val
)
2821 struct mem_cgroup
*memcg
= mem_cgroup_from_css(css
);
2823 if (val
> MAX_SWAPPINESS
)
2826 if (!mem_cgroup_is_root(memcg
))
2827 WRITE_ONCE(memcg
->swappiness
, val
);
2829 WRITE_ONCE(vm_swappiness
, val
);
2834 static int mem_cgroup_oom_control_read(struct seq_file
*sf
, void *v
)
2836 struct mem_cgroup
*memcg
= mem_cgroup_from_seq(sf
);
2838 seq_printf(sf
, "oom_kill_disable %d\n", READ_ONCE(memcg
->oom_kill_disable
));
2839 seq_printf(sf
, "under_oom %d\n", (bool)memcg
->under_oom
);
2840 seq_printf(sf
, "oom_kill %lu\n",
2841 atomic_long_read(&memcg
->memory_events
[MEMCG_OOM_KILL
]));
2845 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state
*css
,
2846 struct cftype
*cft
, u64 val
)
2848 struct mem_cgroup
*memcg
= mem_cgroup_from_css(css
);
2850 pr_warn_once("oom_control is deprecated and will be removed. "
2851 "Please report your usecase to linux-mm-@kvack.org if you "
2852 "depend on this functionality. \n");
2854 /* cannot set to root cgroup and only 0 and 1 are allowed */
2855 if (mem_cgroup_is_root(memcg
) || !((val
== 0) || (val
== 1)))
2858 WRITE_ONCE(memcg
->oom_kill_disable
, val
);
2860 memcg1_oom_recover(memcg
);
2865 #ifdef CONFIG_SLUB_DEBUG
2866 static int mem_cgroup_slab_show(struct seq_file
*m
, void *p
)
2870 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
2876 struct cftype mem_cgroup_legacy_files
[] = {
2878 .name
= "usage_in_bytes",
2879 .private = MEMFILE_PRIVATE(_MEM
, RES_USAGE
),
2880 .read_u64
= mem_cgroup_read_u64
,
2883 .name
= "max_usage_in_bytes",
2884 .private = MEMFILE_PRIVATE(_MEM
, RES_MAX_USAGE
),
2885 .write
= mem_cgroup_reset
,
2886 .read_u64
= mem_cgroup_read_u64
,
2889 .name
= "limit_in_bytes",
2890 .private = MEMFILE_PRIVATE(_MEM
, RES_LIMIT
),
2891 .write
= mem_cgroup_write
,
2892 .read_u64
= mem_cgroup_read_u64
,
2895 .name
= "soft_limit_in_bytes",
2896 .private = MEMFILE_PRIVATE(_MEM
, RES_SOFT_LIMIT
),
2897 .write
= mem_cgroup_write
,
2898 .read_u64
= mem_cgroup_read_u64
,
2902 .private = MEMFILE_PRIVATE(_MEM
, RES_FAILCNT
),
2903 .write
= mem_cgroup_reset
,
2904 .read_u64
= mem_cgroup_read_u64
,
2908 .seq_show
= memory_stat_show
,
2911 .name
= "force_empty",
2912 .write
= mem_cgroup_force_empty_write
,
2915 .name
= "use_hierarchy",
2916 .write_u64
= mem_cgroup_hierarchy_write
,
2917 .read_u64
= mem_cgroup_hierarchy_read
,
2920 .name
= "cgroup.event_control", /* XXX: for compat */
2921 .write
= memcg_write_event_control
,
2922 .flags
= CFTYPE_NO_PREFIX
| CFTYPE_WORLD_WRITABLE
,
2925 .name
= "swappiness",
2926 .read_u64
= mem_cgroup_swappiness_read
,
2927 .write_u64
= mem_cgroup_swappiness_write
,
2930 .name
= "move_charge_at_immigrate",
2931 .read_u64
= mem_cgroup_move_charge_read
,
2932 .write_u64
= mem_cgroup_move_charge_write
,
2935 .name
= "oom_control",
2936 .seq_show
= mem_cgroup_oom_control_read
,
2937 .write_u64
= mem_cgroup_oom_control_write
,
2940 .name
= "pressure_level",
2941 .seq_show
= mem_cgroup_dummy_seq_show
,
2945 .name
= "numa_stat",
2946 .seq_show
= memcg_numa_stat_show
,
2950 .name
= "kmem.limit_in_bytes",
2951 .private = MEMFILE_PRIVATE(_KMEM
, RES_LIMIT
),
2952 .write
= mem_cgroup_write
,
2953 .read_u64
= mem_cgroup_read_u64
,
2956 .name
= "kmem.usage_in_bytes",
2957 .private = MEMFILE_PRIVATE(_KMEM
, RES_USAGE
),
2958 .read_u64
= mem_cgroup_read_u64
,
2961 .name
= "kmem.failcnt",
2962 .private = MEMFILE_PRIVATE(_KMEM
, RES_FAILCNT
),
2963 .write
= mem_cgroup_reset
,
2964 .read_u64
= mem_cgroup_read_u64
,
2967 .name
= "kmem.max_usage_in_bytes",
2968 .private = MEMFILE_PRIVATE(_KMEM
, RES_MAX_USAGE
),
2969 .write
= mem_cgroup_reset
,
2970 .read_u64
= mem_cgroup_read_u64
,
2972 #ifdef CONFIG_SLUB_DEBUG
2974 .name
= "kmem.slabinfo",
2975 .seq_show
= mem_cgroup_slab_show
,
2979 .name
= "kmem.tcp.limit_in_bytes",
2980 .private = MEMFILE_PRIVATE(_TCP
, RES_LIMIT
),
2981 .write
= mem_cgroup_write
,
2982 .read_u64
= mem_cgroup_read_u64
,
2985 .name
= "kmem.tcp.usage_in_bytes",
2986 .private = MEMFILE_PRIVATE(_TCP
, RES_USAGE
),
2987 .read_u64
= mem_cgroup_read_u64
,
2990 .name
= "kmem.tcp.failcnt",
2991 .private = MEMFILE_PRIVATE(_TCP
, RES_FAILCNT
),
2992 .write
= mem_cgroup_reset
,
2993 .read_u64
= mem_cgroup_read_u64
,
2996 .name
= "kmem.tcp.max_usage_in_bytes",
2997 .private = MEMFILE_PRIVATE(_TCP
, RES_MAX_USAGE
),
2998 .write
= mem_cgroup_reset
,
2999 .read_u64
= mem_cgroup_read_u64
,
3001 { }, /* terminate */
3004 struct cftype memsw_files
[] = {
3006 .name
= "memsw.usage_in_bytes",
3007 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_USAGE
),
3008 .read_u64
= mem_cgroup_read_u64
,
3011 .name
= "memsw.max_usage_in_bytes",
3012 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_MAX_USAGE
),
3013 .write
= mem_cgroup_reset
,
3014 .read_u64
= mem_cgroup_read_u64
,
3017 .name
= "memsw.limit_in_bytes",
3018 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_LIMIT
),
3019 .write
= mem_cgroup_write
,
3020 .read_u64
= mem_cgroup_read_u64
,
3023 .name
= "memsw.failcnt",
3024 .private = MEMFILE_PRIVATE(_MEMSWAP
, RES_FAILCNT
),
3025 .write
= mem_cgroup_reset
,
3026 .read_u64
= mem_cgroup_read_u64
,
3028 { }, /* terminate */
3031 void memcg1_account_kmem(struct mem_cgroup
*memcg
, int nr_pages
)
3033 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys
)) {
3035 page_counter_charge(&memcg
->kmem
, nr_pages
);
3037 page_counter_uncharge(&memcg
->kmem
, -nr_pages
);
3041 bool memcg1_charge_skmem(struct mem_cgroup
*memcg
, unsigned int nr_pages
,
3044 struct page_counter
*fail
;
3046 if (page_counter_try_charge(&memcg
->tcpmem
, nr_pages
, &fail
)) {
3047 memcg
->tcpmem_pressure
= 0;
3050 memcg
->tcpmem_pressure
= 1;
3051 if (gfp_mask
& __GFP_NOFAIL
) {
3052 page_counter_charge(&memcg
->tcpmem
, nr_pages
);
3058 bool memcg1_alloc_events(struct mem_cgroup
*memcg
)
3060 memcg
->events_percpu
= alloc_percpu_gfp(struct memcg1_events_percpu
,
3061 GFP_KERNEL_ACCOUNT
);
3062 return !!memcg
->events_percpu
;
3065 void memcg1_free_events(struct mem_cgroup
*memcg
)
3067 if (memcg
->events_percpu
)
3068 free_percpu(memcg
->events_percpu
);
3071 static int __init
memcg1_init(void)
3075 for_each_node(node
) {
3076 struct mem_cgroup_tree_per_node
*rtpn
;
3078 rtpn
= kzalloc_node(sizeof(*rtpn
), GFP_KERNEL
, node
);
3080 rtpn
->rb_root
= RB_ROOT
;
3081 rtpn
->rb_rightmost
= NULL
;
3082 spin_lock_init(&rtpn
->lock
);
3083 soft_limit_tree
.rb_tree_per_node
[node
] = rtpn
;
3088 subsys_initcall(memcg1_init
);