Merge tag '6.11-rc-smb-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux-stable.git] / mm / memcontrol.c
blob960371788687720de58cdf146b900d6efb66c04c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/sched/mm.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/hugetlb.h>
34 #include <linux/pagemap.h>
35 #include <linux/pagevec.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swapops.h>
48 #include <linux/spinlock.h>
49 #include <linux/fs.h>
50 #include <linux/seq_file.h>
51 #include <linux/parser.h>
52 #include <linux/vmpressure.h>
53 #include <linux/memremap.h>
54 #include <linux/mm_inline.h>
55 #include <linux/swap_cgroup.h>
56 #include <linux/cpu.h>
57 #include <linux/oom.h>
58 #include <linux/lockdep.h>
59 #include <linux/resume_user_mode.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include <linux/sched/isolation.h>
63 #include <linux/kmemleak.h>
64 #include "internal.h"
65 #include <net/sock.h>
66 #include <net/ip.h>
67 #include "slab.h"
68 #include "memcontrol-v1.h"
70 #include <linux/uaccess.h>
72 #include <trace/events/vmscan.h>
74 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
75 EXPORT_SYMBOL(memory_cgrp_subsys);
77 struct mem_cgroup *root_mem_cgroup __read_mostly;
79 /* Active memory cgroup to use from an interrupt context */
80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
83 /* Socket memory accounting disabled? */
84 static bool cgroup_memory_nosocket __ro_after_init;
86 /* Kernel memory accounting disabled? */
87 static bool cgroup_memory_nokmem __ro_after_init;
89 /* BPF memory accounting disabled? */
90 static bool cgroup_memory_nobpf __ro_after_init;
92 #ifdef CONFIG_CGROUP_WRITEBACK
93 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
94 #endif
96 #define THRESHOLDS_EVENTS_TARGET 128
97 #define SOFTLIMIT_EVENTS_TARGET 1024
99 static inline bool task_is_dying(void)
101 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
102 (current->flags & PF_EXITING);
105 /* Some nice accessors for the vmpressure. */
106 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
108 if (!memcg)
109 memcg = root_mem_cgroup;
110 return &memcg->vmpressure;
113 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
115 return container_of(vmpr, struct mem_cgroup, vmpressure);
118 #define CURRENT_OBJCG_UPDATE_BIT 0
119 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
121 static DEFINE_SPINLOCK(objcg_lock);
123 bool mem_cgroup_kmem_disabled(void)
125 return cgroup_memory_nokmem;
128 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
129 unsigned int nr_pages);
131 static void obj_cgroup_release(struct percpu_ref *ref)
133 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
134 unsigned int nr_bytes;
135 unsigned int nr_pages;
136 unsigned long flags;
139 * At this point all allocated objects are freed, and
140 * objcg->nr_charged_bytes can't have an arbitrary byte value.
141 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
143 * The following sequence can lead to it:
144 * 1) CPU0: objcg == stock->cached_objcg
145 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
146 * PAGE_SIZE bytes are charged
147 * 3) CPU1: a process from another memcg is allocating something,
148 * the stock if flushed,
149 * objcg->nr_charged_bytes = PAGE_SIZE - 92
150 * 5) CPU0: we do release this object,
151 * 92 bytes are added to stock->nr_bytes
152 * 6) CPU0: stock is flushed,
153 * 92 bytes are added to objcg->nr_charged_bytes
155 * In the result, nr_charged_bytes == PAGE_SIZE.
156 * This page will be uncharged in obj_cgroup_release().
158 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
159 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
160 nr_pages = nr_bytes >> PAGE_SHIFT;
162 if (nr_pages)
163 obj_cgroup_uncharge_pages(objcg, nr_pages);
165 spin_lock_irqsave(&objcg_lock, flags);
166 list_del(&objcg->list);
167 spin_unlock_irqrestore(&objcg_lock, flags);
169 percpu_ref_exit(ref);
170 kfree_rcu(objcg, rcu);
173 static struct obj_cgroup *obj_cgroup_alloc(void)
175 struct obj_cgroup *objcg;
176 int ret;
178 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
179 if (!objcg)
180 return NULL;
182 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
183 GFP_KERNEL);
184 if (ret) {
185 kfree(objcg);
186 return NULL;
188 INIT_LIST_HEAD(&objcg->list);
189 return objcg;
192 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
193 struct mem_cgroup *parent)
195 struct obj_cgroup *objcg, *iter;
197 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
199 spin_lock_irq(&objcg_lock);
201 /* 1) Ready to reparent active objcg. */
202 list_add(&objcg->list, &memcg->objcg_list);
203 /* 2) Reparent active objcg and already reparented objcgs to parent. */
204 list_for_each_entry(iter, &memcg->objcg_list, list)
205 WRITE_ONCE(iter->memcg, parent);
206 /* 3) Move already reparented objcgs to the parent's list */
207 list_splice(&memcg->objcg_list, &parent->objcg_list);
209 spin_unlock_irq(&objcg_lock);
211 percpu_ref_kill(&objcg->refcnt);
215 * A lot of the calls to the cache allocation functions are expected to be
216 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
217 * conditional to this static branch, we'll have to allow modules that does
218 * kmem_cache_alloc and the such to see this symbol as well
220 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
221 EXPORT_SYMBOL(memcg_kmem_online_key);
223 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
224 EXPORT_SYMBOL(memcg_bpf_enabled_key);
227 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
228 * @folio: folio of interest
230 * If memcg is bound to the default hierarchy, css of the memcg associated
231 * with @folio is returned. The returned css remains associated with @folio
232 * until it is released.
234 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
235 * is returned.
237 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
239 struct mem_cgroup *memcg = folio_memcg(folio);
241 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
242 memcg = root_mem_cgroup;
244 return &memcg->css;
248 * page_cgroup_ino - return inode number of the memcg a page is charged to
249 * @page: the page
251 * Look up the closest online ancestor of the memory cgroup @page is charged to
252 * and return its inode number or 0 if @page is not charged to any cgroup. It
253 * is safe to call this function without holding a reference to @page.
255 * Note, this function is inherently racy, because there is nothing to prevent
256 * the cgroup inode from getting torn down and potentially reallocated a moment
257 * after page_cgroup_ino() returns, so it only should be used by callers that
258 * do not care (such as procfs interfaces).
260 ino_t page_cgroup_ino(struct page *page)
262 struct mem_cgroup *memcg;
263 unsigned long ino = 0;
265 rcu_read_lock();
266 /* page_folio() is racy here, but the entire function is racy anyway */
267 memcg = folio_memcg_check(page_folio(page));
269 while (memcg && !(memcg->css.flags & CSS_ONLINE))
270 memcg = parent_mem_cgroup(memcg);
271 if (memcg)
272 ino = cgroup_ino(memcg->css.cgroup);
273 rcu_read_unlock();
274 return ino;
277 /* Subset of node_stat_item for memcg stats */
278 static const unsigned int memcg_node_stat_items[] = {
279 NR_INACTIVE_ANON,
280 NR_ACTIVE_ANON,
281 NR_INACTIVE_FILE,
282 NR_ACTIVE_FILE,
283 NR_UNEVICTABLE,
284 NR_SLAB_RECLAIMABLE_B,
285 NR_SLAB_UNRECLAIMABLE_B,
286 WORKINGSET_REFAULT_ANON,
287 WORKINGSET_REFAULT_FILE,
288 WORKINGSET_ACTIVATE_ANON,
289 WORKINGSET_ACTIVATE_FILE,
290 WORKINGSET_RESTORE_ANON,
291 WORKINGSET_RESTORE_FILE,
292 WORKINGSET_NODERECLAIM,
293 NR_ANON_MAPPED,
294 NR_FILE_MAPPED,
295 NR_FILE_PAGES,
296 NR_FILE_DIRTY,
297 NR_WRITEBACK,
298 NR_SHMEM,
299 NR_SHMEM_THPS,
300 NR_FILE_THPS,
301 NR_ANON_THPS,
302 NR_KERNEL_STACK_KB,
303 NR_PAGETABLE,
304 NR_SECONDARY_PAGETABLE,
305 #ifdef CONFIG_SWAP
306 NR_SWAPCACHE,
307 #endif
310 static const unsigned int memcg_stat_items[] = {
311 MEMCG_SWAP,
312 MEMCG_SOCK,
313 MEMCG_PERCPU_B,
314 MEMCG_VMALLOC,
315 MEMCG_KMEM,
316 MEMCG_ZSWAP_B,
317 MEMCG_ZSWAPPED,
320 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
321 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
322 ARRAY_SIZE(memcg_stat_items))
323 static int8_t mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
325 static void init_memcg_stats(void)
327 int8_t i, j = 0;
329 BUILD_BUG_ON(MEMCG_NR_STAT >= S8_MAX);
331 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i)
332 mem_cgroup_stats_index[memcg_node_stat_items[i]] = ++j;
334 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i)
335 mem_cgroup_stats_index[memcg_stat_items[i]] = ++j;
338 static inline int memcg_stats_index(int idx)
340 return mem_cgroup_stats_index[idx] - 1;
343 struct lruvec_stats_percpu {
344 /* Local (CPU and cgroup) state */
345 long state[NR_MEMCG_NODE_STAT_ITEMS];
347 /* Delta calculation for lockless upward propagation */
348 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
351 struct lruvec_stats {
352 /* Aggregated (CPU and subtree) state */
353 long state[NR_MEMCG_NODE_STAT_ITEMS];
355 /* Non-hierarchical (CPU aggregated) state */
356 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
358 /* Pending child counts during tree propagation */
359 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
362 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
364 struct mem_cgroup_per_node *pn;
365 long x;
366 int i;
368 if (mem_cgroup_disabled())
369 return node_page_state(lruvec_pgdat(lruvec), idx);
371 i = memcg_stats_index(idx);
372 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
373 return 0;
375 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
376 x = READ_ONCE(pn->lruvec_stats->state[i]);
377 #ifdef CONFIG_SMP
378 if (x < 0)
379 x = 0;
380 #endif
381 return x;
384 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
385 enum node_stat_item idx)
387 struct mem_cgroup_per_node *pn;
388 long x;
389 int i;
391 if (mem_cgroup_disabled())
392 return node_page_state(lruvec_pgdat(lruvec), idx);
394 i = memcg_stats_index(idx);
395 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
396 return 0;
398 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
399 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
400 #ifdef CONFIG_SMP
401 if (x < 0)
402 x = 0;
403 #endif
404 return x;
407 /* Subset of vm_event_item to report for memcg event stats */
408 static const unsigned int memcg_vm_event_stat[] = {
409 PGPGIN,
410 PGPGOUT,
411 PGSCAN_KSWAPD,
412 PGSCAN_DIRECT,
413 PGSCAN_KHUGEPAGED,
414 PGSTEAL_KSWAPD,
415 PGSTEAL_DIRECT,
416 PGSTEAL_KHUGEPAGED,
417 PGFAULT,
418 PGMAJFAULT,
419 PGREFILL,
420 PGACTIVATE,
421 PGDEACTIVATE,
422 PGLAZYFREE,
423 PGLAZYFREED,
424 #ifdef CONFIG_ZSWAP
425 ZSWPIN,
426 ZSWPOUT,
427 ZSWPWB,
428 #endif
429 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
430 THP_FAULT_ALLOC,
431 THP_COLLAPSE_ALLOC,
432 THP_SWPOUT,
433 THP_SWPOUT_FALLBACK,
434 #endif
437 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
438 static int8_t mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
440 static void init_memcg_events(void)
442 int8_t i;
444 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= S8_MAX);
446 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
447 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
450 static inline int memcg_events_index(enum vm_event_item idx)
452 return mem_cgroup_events_index[idx] - 1;
455 struct memcg_vmstats_percpu {
456 /* Stats updates since the last flush */
457 unsigned int stats_updates;
459 /* Cached pointers for fast iteration in memcg_rstat_updated() */
460 struct memcg_vmstats_percpu *parent;
461 struct memcg_vmstats *vmstats;
463 /* The above should fit a single cacheline for memcg_rstat_updated() */
465 /* Local (CPU and cgroup) page state & events */
466 long state[MEMCG_VMSTAT_SIZE];
467 unsigned long events[NR_MEMCG_EVENTS];
469 /* Delta calculation for lockless upward propagation */
470 long state_prev[MEMCG_VMSTAT_SIZE];
471 unsigned long events_prev[NR_MEMCG_EVENTS];
473 /* Cgroup1: threshold notifications & softlimit tree updates */
474 unsigned long nr_page_events;
475 unsigned long targets[MEM_CGROUP_NTARGETS];
476 } ____cacheline_aligned;
478 struct memcg_vmstats {
479 /* Aggregated (CPU and subtree) page state & events */
480 long state[MEMCG_VMSTAT_SIZE];
481 unsigned long events[NR_MEMCG_EVENTS];
483 /* Non-hierarchical (CPU aggregated) page state & events */
484 long state_local[MEMCG_VMSTAT_SIZE];
485 unsigned long events_local[NR_MEMCG_EVENTS];
487 /* Pending child counts during tree propagation */
488 long state_pending[MEMCG_VMSTAT_SIZE];
489 unsigned long events_pending[NR_MEMCG_EVENTS];
491 /* Stats updates since the last flush */
492 atomic64_t stats_updates;
496 * memcg and lruvec stats flushing
498 * Many codepaths leading to stats update or read are performance sensitive and
499 * adding stats flushing in such codepaths is not desirable. So, to optimize the
500 * flushing the kernel does:
502 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
503 * rstat update tree grow unbounded.
505 * 2) Flush the stats synchronously on reader side only when there are more than
506 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
507 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
508 * only for 2 seconds due to (1).
510 static void flush_memcg_stats_dwork(struct work_struct *w);
511 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
512 static u64 flush_last_time;
514 #define FLUSH_TIME (2UL*HZ)
517 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
518 * not rely on this as part of an acquired spinlock_t lock. These functions are
519 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
520 * is sufficient.
522 static void memcg_stats_lock(void)
524 preempt_disable_nested();
525 VM_WARN_ON_IRQS_ENABLED();
528 static void __memcg_stats_lock(void)
530 preempt_disable_nested();
533 static void memcg_stats_unlock(void)
535 preempt_enable_nested();
539 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
541 return atomic64_read(&vmstats->stats_updates) >
542 MEMCG_CHARGE_BATCH * num_online_cpus();
545 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
547 struct memcg_vmstats_percpu *statc;
548 int cpu = smp_processor_id();
549 unsigned int stats_updates;
551 if (!val)
552 return;
554 cgroup_rstat_updated(memcg->css.cgroup, cpu);
555 statc = this_cpu_ptr(memcg->vmstats_percpu);
556 for (; statc; statc = statc->parent) {
557 stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
558 WRITE_ONCE(statc->stats_updates, stats_updates);
559 if (stats_updates < MEMCG_CHARGE_BATCH)
560 continue;
563 * If @memcg is already flush-able, increasing stats_updates is
564 * redundant. Avoid the overhead of the atomic update.
566 if (!memcg_vmstats_needs_flush(statc->vmstats))
567 atomic64_add(stats_updates,
568 &statc->vmstats->stats_updates);
569 WRITE_ONCE(statc->stats_updates, 0);
573 static void do_flush_stats(struct mem_cgroup *memcg)
575 if (mem_cgroup_is_root(memcg))
576 WRITE_ONCE(flush_last_time, jiffies_64);
578 cgroup_rstat_flush(memcg->css.cgroup);
582 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
583 * @memcg: root of the subtree to flush
585 * Flushing is serialized by the underlying global rstat lock. There is also a
586 * minimum amount of work to be done even if there are no stat updates to flush.
587 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
588 * avoids unnecessary work and contention on the underlying lock.
590 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
592 if (mem_cgroup_disabled())
593 return;
595 if (!memcg)
596 memcg = root_mem_cgroup;
598 if (memcg_vmstats_needs_flush(memcg->vmstats))
599 do_flush_stats(memcg);
602 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
604 /* Only flush if the periodic flusher is one full cycle late */
605 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
606 mem_cgroup_flush_stats(memcg);
609 static void flush_memcg_stats_dwork(struct work_struct *w)
612 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
613 * in latency-sensitive paths is as cheap as possible.
615 do_flush_stats(root_mem_cgroup);
616 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
619 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
621 long x;
622 int i = memcg_stats_index(idx);
624 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
625 return 0;
627 x = READ_ONCE(memcg->vmstats->state[i]);
628 #ifdef CONFIG_SMP
629 if (x < 0)
630 x = 0;
631 #endif
632 return x;
635 static int memcg_page_state_unit(int item);
638 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
639 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
641 static int memcg_state_val_in_pages(int idx, int val)
643 int unit = memcg_page_state_unit(idx);
645 if (!val || unit == PAGE_SIZE)
646 return val;
647 else
648 return max(val * unit / PAGE_SIZE, 1UL);
652 * __mod_memcg_state - update cgroup memory statistics
653 * @memcg: the memory cgroup
654 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
655 * @val: delta to add to the counter, can be negative
657 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
658 int val)
660 int i = memcg_stats_index(idx);
662 if (mem_cgroup_disabled())
663 return;
665 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
666 return;
668 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
669 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
672 /* idx can be of type enum memcg_stat_item or node_stat_item. */
673 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
675 long x;
676 int i = memcg_stats_index(idx);
678 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
679 return 0;
681 x = READ_ONCE(memcg->vmstats->state_local[i]);
682 #ifdef CONFIG_SMP
683 if (x < 0)
684 x = 0;
685 #endif
686 return x;
689 static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
690 enum node_stat_item idx,
691 int val)
693 struct mem_cgroup_per_node *pn;
694 struct mem_cgroup *memcg;
695 int i = memcg_stats_index(idx);
697 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
698 return;
700 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
701 memcg = pn->memcg;
704 * The caller from rmap relies on disabled preemption because they never
705 * update their counter from in-interrupt context. For these two
706 * counters we check that the update is never performed from an
707 * interrupt context while other caller need to have disabled interrupt.
709 __memcg_stats_lock();
710 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
711 switch (idx) {
712 case NR_ANON_MAPPED:
713 case NR_FILE_MAPPED:
714 case NR_ANON_THPS:
715 WARN_ON_ONCE(!in_task());
716 break;
717 default:
718 VM_WARN_ON_IRQS_ENABLED();
722 /* Update memcg */
723 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
725 /* Update lruvec */
726 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
728 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
729 memcg_stats_unlock();
733 * __mod_lruvec_state - update lruvec memory statistics
734 * @lruvec: the lruvec
735 * @idx: the stat item
736 * @val: delta to add to the counter, can be negative
738 * The lruvec is the intersection of the NUMA node and a cgroup. This
739 * function updates the all three counters that are affected by a
740 * change of state at this level: per-node, per-cgroup, per-lruvec.
742 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
743 int val)
745 /* Update node */
746 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
748 /* Update memcg and lruvec */
749 if (!mem_cgroup_disabled())
750 __mod_memcg_lruvec_state(lruvec, idx, val);
753 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
754 int val)
756 struct mem_cgroup *memcg;
757 pg_data_t *pgdat = folio_pgdat(folio);
758 struct lruvec *lruvec;
760 rcu_read_lock();
761 memcg = folio_memcg(folio);
762 /* Untracked pages have no memcg, no lruvec. Update only the node */
763 if (!memcg) {
764 rcu_read_unlock();
765 __mod_node_page_state(pgdat, idx, val);
766 return;
769 lruvec = mem_cgroup_lruvec(memcg, pgdat);
770 __mod_lruvec_state(lruvec, idx, val);
771 rcu_read_unlock();
773 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
775 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
777 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
778 struct mem_cgroup *memcg;
779 struct lruvec *lruvec;
781 rcu_read_lock();
782 memcg = mem_cgroup_from_slab_obj(p);
785 * Untracked pages have no memcg, no lruvec. Update only the
786 * node. If we reparent the slab objects to the root memcg,
787 * when we free the slab object, we need to update the per-memcg
788 * vmstats to keep it correct for the root memcg.
790 if (!memcg) {
791 __mod_node_page_state(pgdat, idx, val);
792 } else {
793 lruvec = mem_cgroup_lruvec(memcg, pgdat);
794 __mod_lruvec_state(lruvec, idx, val);
796 rcu_read_unlock();
800 * __count_memcg_events - account VM events in a cgroup
801 * @memcg: the memory cgroup
802 * @idx: the event item
803 * @count: the number of events that occurred
805 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
806 unsigned long count)
808 int i = memcg_events_index(idx);
810 if (mem_cgroup_disabled())
811 return;
813 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
814 return;
816 memcg_stats_lock();
817 __this_cpu_add(memcg->vmstats_percpu->events[i], count);
818 memcg_rstat_updated(memcg, count);
819 memcg_stats_unlock();
822 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
824 int i = memcg_events_index(event);
826 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
827 return 0;
829 return READ_ONCE(memcg->vmstats->events[i]);
832 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
834 int i = memcg_events_index(event);
836 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
837 return 0;
839 return READ_ONCE(memcg->vmstats->events_local[i]);
842 void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
844 /* pagein of a big page is an event. So, ignore page size */
845 if (nr_pages > 0)
846 __count_memcg_events(memcg, PGPGIN, 1);
847 else {
848 __count_memcg_events(memcg, PGPGOUT, 1);
849 nr_pages = -nr_pages; /* for event */
852 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
855 bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
856 enum mem_cgroup_events_target target)
858 unsigned long val, next;
860 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
861 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
862 /* from time_after() in jiffies.h */
863 if ((long)(next - val) < 0) {
864 switch (target) {
865 case MEM_CGROUP_TARGET_THRESH:
866 next = val + THRESHOLDS_EVENTS_TARGET;
867 break;
868 case MEM_CGROUP_TARGET_SOFTLIMIT:
869 next = val + SOFTLIMIT_EVENTS_TARGET;
870 break;
871 default:
872 break;
874 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
875 return true;
877 return false;
880 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
883 * mm_update_next_owner() may clear mm->owner to NULL
884 * if it races with swapoff, page migration, etc.
885 * So this can be called with p == NULL.
887 if (unlikely(!p))
888 return NULL;
890 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
892 EXPORT_SYMBOL(mem_cgroup_from_task);
894 static __always_inline struct mem_cgroup *active_memcg(void)
896 if (!in_task())
897 return this_cpu_read(int_active_memcg);
898 else
899 return current->active_memcg;
903 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
904 * @mm: mm from which memcg should be extracted. It can be NULL.
906 * Obtain a reference on mm->memcg and returns it if successful. If mm
907 * is NULL, then the memcg is chosen as follows:
908 * 1) The active memcg, if set.
909 * 2) current->mm->memcg, if available
910 * 3) root memcg
911 * If mem_cgroup is disabled, NULL is returned.
913 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
915 struct mem_cgroup *memcg;
917 if (mem_cgroup_disabled())
918 return NULL;
921 * Page cache insertions can happen without an
922 * actual mm context, e.g. during disk probing
923 * on boot, loopback IO, acct() writes etc.
925 * No need to css_get on root memcg as the reference
926 * counting is disabled on the root level in the
927 * cgroup core. See CSS_NO_REF.
929 if (unlikely(!mm)) {
930 memcg = active_memcg();
931 if (unlikely(memcg)) {
932 /* remote memcg must hold a ref */
933 css_get(&memcg->css);
934 return memcg;
936 mm = current->mm;
937 if (unlikely(!mm))
938 return root_mem_cgroup;
941 rcu_read_lock();
942 do {
943 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
944 if (unlikely(!memcg))
945 memcg = root_mem_cgroup;
946 } while (!css_tryget(&memcg->css));
947 rcu_read_unlock();
948 return memcg;
950 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
953 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
955 struct mem_cgroup *get_mem_cgroup_from_current(void)
957 struct mem_cgroup *memcg;
959 if (mem_cgroup_disabled())
960 return NULL;
962 again:
963 rcu_read_lock();
964 memcg = mem_cgroup_from_task(current);
965 if (!css_tryget(&memcg->css)) {
966 rcu_read_unlock();
967 goto again;
969 rcu_read_unlock();
970 return memcg;
974 * mem_cgroup_iter - iterate over memory cgroup hierarchy
975 * @root: hierarchy root
976 * @prev: previously returned memcg, NULL on first invocation
977 * @reclaim: cookie for shared reclaim walks, NULL for full walks
979 * Returns references to children of the hierarchy below @root, or
980 * @root itself, or %NULL after a full round-trip.
982 * Caller must pass the return value in @prev on subsequent
983 * invocations for reference counting, or use mem_cgroup_iter_break()
984 * to cancel a hierarchy walk before the round-trip is complete.
986 * Reclaimers can specify a node in @reclaim to divide up the memcgs
987 * in the hierarchy among all concurrent reclaimers operating on the
988 * same node.
990 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
991 struct mem_cgroup *prev,
992 struct mem_cgroup_reclaim_cookie *reclaim)
994 struct mem_cgroup_reclaim_iter *iter;
995 struct cgroup_subsys_state *css = NULL;
996 struct mem_cgroup *memcg = NULL;
997 struct mem_cgroup *pos = NULL;
999 if (mem_cgroup_disabled())
1000 return NULL;
1002 if (!root)
1003 root = root_mem_cgroup;
1005 rcu_read_lock();
1007 if (reclaim) {
1008 struct mem_cgroup_per_node *mz;
1010 mz = root->nodeinfo[reclaim->pgdat->node_id];
1011 iter = &mz->iter;
1014 * On start, join the current reclaim iteration cycle.
1015 * Exit when a concurrent walker completes it.
1017 if (!prev)
1018 reclaim->generation = iter->generation;
1019 else if (reclaim->generation != iter->generation)
1020 goto out_unlock;
1022 while (1) {
1023 pos = READ_ONCE(iter->position);
1024 if (!pos || css_tryget(&pos->css))
1025 break;
1027 * css reference reached zero, so iter->position will
1028 * be cleared by ->css_released. However, we should not
1029 * rely on this happening soon, because ->css_released
1030 * is called from a work queue, and by busy-waiting we
1031 * might block it. So we clear iter->position right
1032 * away.
1034 (void)cmpxchg(&iter->position, pos, NULL);
1036 } else if (prev) {
1037 pos = prev;
1040 if (pos)
1041 css = &pos->css;
1043 for (;;) {
1044 css = css_next_descendant_pre(css, &root->css);
1045 if (!css) {
1047 * Reclaimers share the hierarchy walk, and a
1048 * new one might jump in right at the end of
1049 * the hierarchy - make sure they see at least
1050 * one group and restart from the beginning.
1052 if (!prev)
1053 continue;
1054 break;
1058 * Verify the css and acquire a reference. The root
1059 * is provided by the caller, so we know it's alive
1060 * and kicking, and don't take an extra reference.
1062 if (css == &root->css || css_tryget(css)) {
1063 memcg = mem_cgroup_from_css(css);
1064 break;
1068 if (reclaim) {
1070 * The position could have already been updated by a competing
1071 * thread, so check that the value hasn't changed since we read
1072 * it to avoid reclaiming from the same cgroup twice.
1074 (void)cmpxchg(&iter->position, pos, memcg);
1076 if (pos)
1077 css_put(&pos->css);
1079 if (!memcg)
1080 iter->generation++;
1083 out_unlock:
1084 rcu_read_unlock();
1085 if (prev && prev != root)
1086 css_put(&prev->css);
1088 return memcg;
1092 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1093 * @root: hierarchy root
1094 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1096 void mem_cgroup_iter_break(struct mem_cgroup *root,
1097 struct mem_cgroup *prev)
1099 if (!root)
1100 root = root_mem_cgroup;
1101 if (prev && prev != root)
1102 css_put(&prev->css);
1105 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1106 struct mem_cgroup *dead_memcg)
1108 struct mem_cgroup_reclaim_iter *iter;
1109 struct mem_cgroup_per_node *mz;
1110 int nid;
1112 for_each_node(nid) {
1113 mz = from->nodeinfo[nid];
1114 iter = &mz->iter;
1115 cmpxchg(&iter->position, dead_memcg, NULL);
1119 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1121 struct mem_cgroup *memcg = dead_memcg;
1122 struct mem_cgroup *last;
1124 do {
1125 __invalidate_reclaim_iterators(memcg, dead_memcg);
1126 last = memcg;
1127 } while ((memcg = parent_mem_cgroup(memcg)));
1130 * When cgroup1 non-hierarchy mode is used,
1131 * parent_mem_cgroup() does not walk all the way up to the
1132 * cgroup root (root_mem_cgroup). So we have to handle
1133 * dead_memcg from cgroup root separately.
1135 if (!mem_cgroup_is_root(last))
1136 __invalidate_reclaim_iterators(root_mem_cgroup,
1137 dead_memcg);
1141 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1142 * @memcg: hierarchy root
1143 * @fn: function to call for each task
1144 * @arg: argument passed to @fn
1146 * This function iterates over tasks attached to @memcg or to any of its
1147 * descendants and calls @fn for each task. If @fn returns a non-zero
1148 * value, the function breaks the iteration loop. Otherwise, it will iterate
1149 * over all tasks and return 0.
1151 * This function must not be called for the root memory cgroup.
1153 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1154 int (*fn)(struct task_struct *, void *), void *arg)
1156 struct mem_cgroup *iter;
1157 int ret = 0;
1159 BUG_ON(mem_cgroup_is_root(memcg));
1161 for_each_mem_cgroup_tree(iter, memcg) {
1162 struct css_task_iter it;
1163 struct task_struct *task;
1165 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1166 while (!ret && (task = css_task_iter_next(&it)))
1167 ret = fn(task, arg);
1168 css_task_iter_end(&it);
1169 if (ret) {
1170 mem_cgroup_iter_break(memcg, iter);
1171 break;
1176 #ifdef CONFIG_DEBUG_VM
1177 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1179 struct mem_cgroup *memcg;
1181 if (mem_cgroup_disabled())
1182 return;
1184 memcg = folio_memcg(folio);
1186 if (!memcg)
1187 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1188 else
1189 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1191 #endif
1194 * folio_lruvec_lock - Lock the lruvec for a folio.
1195 * @folio: Pointer to the folio.
1197 * These functions are safe to use under any of the following conditions:
1198 * - folio locked
1199 * - folio_test_lru false
1200 * - folio_memcg_lock()
1201 * - folio frozen (refcount of 0)
1203 * Return: The lruvec this folio is on with its lock held.
1205 struct lruvec *folio_lruvec_lock(struct folio *folio)
1207 struct lruvec *lruvec = folio_lruvec(folio);
1209 spin_lock(&lruvec->lru_lock);
1210 lruvec_memcg_debug(lruvec, folio);
1212 return lruvec;
1216 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1217 * @folio: Pointer to the folio.
1219 * These functions are safe to use under any of the following conditions:
1220 * - folio locked
1221 * - folio_test_lru false
1222 * - folio_memcg_lock()
1223 * - folio frozen (refcount of 0)
1225 * Return: The lruvec this folio is on with its lock held and interrupts
1226 * disabled.
1228 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1230 struct lruvec *lruvec = folio_lruvec(folio);
1232 spin_lock_irq(&lruvec->lru_lock);
1233 lruvec_memcg_debug(lruvec, folio);
1235 return lruvec;
1239 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1240 * @folio: Pointer to the folio.
1241 * @flags: Pointer to irqsave flags.
1243 * These functions are safe to use under any of the following conditions:
1244 * - folio locked
1245 * - folio_test_lru false
1246 * - folio_memcg_lock()
1247 * - folio frozen (refcount of 0)
1249 * Return: The lruvec this folio is on with its lock held and interrupts
1250 * disabled.
1252 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1253 unsigned long *flags)
1255 struct lruvec *lruvec = folio_lruvec(folio);
1257 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1258 lruvec_memcg_debug(lruvec, folio);
1260 return lruvec;
1264 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1265 * @lruvec: mem_cgroup per zone lru vector
1266 * @lru: index of lru list the page is sitting on
1267 * @zid: zone id of the accounted pages
1268 * @nr_pages: positive when adding or negative when removing
1270 * This function must be called under lru_lock, just before a page is added
1271 * to or just after a page is removed from an lru list.
1273 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1274 int zid, int nr_pages)
1276 struct mem_cgroup_per_node *mz;
1277 unsigned long *lru_size;
1278 long size;
1280 if (mem_cgroup_disabled())
1281 return;
1283 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1284 lru_size = &mz->lru_zone_size[zid][lru];
1286 if (nr_pages < 0)
1287 *lru_size += nr_pages;
1289 size = *lru_size;
1290 if (WARN_ONCE(size < 0,
1291 "%s(%p, %d, %d): lru_size %ld\n",
1292 __func__, lruvec, lru, nr_pages, size)) {
1293 VM_BUG_ON(1);
1294 *lru_size = 0;
1297 if (nr_pages > 0)
1298 *lru_size += nr_pages;
1302 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1303 * @memcg: the memory cgroup
1305 * Returns the maximum amount of memory @mem can be charged with, in
1306 * pages.
1308 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1310 unsigned long margin = 0;
1311 unsigned long count;
1312 unsigned long limit;
1314 count = page_counter_read(&memcg->memory);
1315 limit = READ_ONCE(memcg->memory.max);
1316 if (count < limit)
1317 margin = limit - count;
1319 if (do_memsw_account()) {
1320 count = page_counter_read(&memcg->memsw);
1321 limit = READ_ONCE(memcg->memsw.max);
1322 if (count < limit)
1323 margin = min(margin, limit - count);
1324 else
1325 margin = 0;
1328 return margin;
1331 struct memory_stat {
1332 const char *name;
1333 unsigned int idx;
1336 static const struct memory_stat memory_stats[] = {
1337 { "anon", NR_ANON_MAPPED },
1338 { "file", NR_FILE_PAGES },
1339 { "kernel", MEMCG_KMEM },
1340 { "kernel_stack", NR_KERNEL_STACK_KB },
1341 { "pagetables", NR_PAGETABLE },
1342 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1343 { "percpu", MEMCG_PERCPU_B },
1344 { "sock", MEMCG_SOCK },
1345 { "vmalloc", MEMCG_VMALLOC },
1346 { "shmem", NR_SHMEM },
1347 #ifdef CONFIG_ZSWAP
1348 { "zswap", MEMCG_ZSWAP_B },
1349 { "zswapped", MEMCG_ZSWAPPED },
1350 #endif
1351 { "file_mapped", NR_FILE_MAPPED },
1352 { "file_dirty", NR_FILE_DIRTY },
1353 { "file_writeback", NR_WRITEBACK },
1354 #ifdef CONFIG_SWAP
1355 { "swapcached", NR_SWAPCACHE },
1356 #endif
1357 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1358 { "anon_thp", NR_ANON_THPS },
1359 { "file_thp", NR_FILE_THPS },
1360 { "shmem_thp", NR_SHMEM_THPS },
1361 #endif
1362 { "inactive_anon", NR_INACTIVE_ANON },
1363 { "active_anon", NR_ACTIVE_ANON },
1364 { "inactive_file", NR_INACTIVE_FILE },
1365 { "active_file", NR_ACTIVE_FILE },
1366 { "unevictable", NR_UNEVICTABLE },
1367 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1368 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1370 /* The memory events */
1371 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1372 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1373 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1374 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1375 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1376 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1377 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1380 /* The actual unit of the state item, not the same as the output unit */
1381 static int memcg_page_state_unit(int item)
1383 switch (item) {
1384 case MEMCG_PERCPU_B:
1385 case MEMCG_ZSWAP_B:
1386 case NR_SLAB_RECLAIMABLE_B:
1387 case NR_SLAB_UNRECLAIMABLE_B:
1388 return 1;
1389 case NR_KERNEL_STACK_KB:
1390 return SZ_1K;
1391 default:
1392 return PAGE_SIZE;
1396 /* Translate stat items to the correct unit for memory.stat output */
1397 static int memcg_page_state_output_unit(int item)
1400 * Workingset state is actually in pages, but we export it to userspace
1401 * as a scalar count of events, so special case it here.
1403 switch (item) {
1404 case WORKINGSET_REFAULT_ANON:
1405 case WORKINGSET_REFAULT_FILE:
1406 case WORKINGSET_ACTIVATE_ANON:
1407 case WORKINGSET_ACTIVATE_FILE:
1408 case WORKINGSET_RESTORE_ANON:
1409 case WORKINGSET_RESTORE_FILE:
1410 case WORKINGSET_NODERECLAIM:
1411 return 1;
1412 default:
1413 return memcg_page_state_unit(item);
1417 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1419 return memcg_page_state(memcg, item) *
1420 memcg_page_state_output_unit(item);
1423 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1425 return memcg_page_state_local(memcg, item) *
1426 memcg_page_state_output_unit(item);
1429 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1431 int i;
1434 * Provide statistics on the state of the memory subsystem as
1435 * well as cumulative event counters that show past behavior.
1437 * This list is ordered following a combination of these gradients:
1438 * 1) generic big picture -> specifics and details
1439 * 2) reflecting userspace activity -> reflecting kernel heuristics
1441 * Current memory state:
1443 mem_cgroup_flush_stats(memcg);
1445 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1446 u64 size;
1448 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1449 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1451 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1452 size += memcg_page_state_output(memcg,
1453 NR_SLAB_RECLAIMABLE_B);
1454 seq_buf_printf(s, "slab %llu\n", size);
1458 /* Accumulated memory events */
1459 seq_buf_printf(s, "pgscan %lu\n",
1460 memcg_events(memcg, PGSCAN_KSWAPD) +
1461 memcg_events(memcg, PGSCAN_DIRECT) +
1462 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1463 seq_buf_printf(s, "pgsteal %lu\n",
1464 memcg_events(memcg, PGSTEAL_KSWAPD) +
1465 memcg_events(memcg, PGSTEAL_DIRECT) +
1466 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1468 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1469 if (memcg_vm_event_stat[i] == PGPGIN ||
1470 memcg_vm_event_stat[i] == PGPGOUT)
1471 continue;
1473 seq_buf_printf(s, "%s %lu\n",
1474 vm_event_name(memcg_vm_event_stat[i]),
1475 memcg_events(memcg, memcg_vm_event_stat[i]));
1479 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1481 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1482 memcg_stat_format(memcg, s);
1483 else
1484 memcg1_stat_format(memcg, s);
1485 if (seq_buf_has_overflowed(s))
1486 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1490 * mem_cgroup_print_oom_context: Print OOM information relevant to
1491 * memory controller.
1492 * @memcg: The memory cgroup that went over limit
1493 * @p: Task that is going to be killed
1495 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1496 * enabled
1498 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1500 rcu_read_lock();
1502 if (memcg) {
1503 pr_cont(",oom_memcg=");
1504 pr_cont_cgroup_path(memcg->css.cgroup);
1505 } else
1506 pr_cont(",global_oom");
1507 if (p) {
1508 pr_cont(",task_memcg=");
1509 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1511 rcu_read_unlock();
1515 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1516 * memory controller.
1517 * @memcg: The memory cgroup that went over limit
1519 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1521 /* Use static buffer, for the caller is holding oom_lock. */
1522 static char buf[PAGE_SIZE];
1523 struct seq_buf s;
1525 lockdep_assert_held(&oom_lock);
1527 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1528 K((u64)page_counter_read(&memcg->memory)),
1529 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1530 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1531 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1532 K((u64)page_counter_read(&memcg->swap)),
1533 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1534 #ifdef CONFIG_MEMCG_V1
1535 else {
1536 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1537 K((u64)page_counter_read(&memcg->memsw)),
1538 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1539 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1540 K((u64)page_counter_read(&memcg->kmem)),
1541 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1543 #endif
1545 pr_info("Memory cgroup stats for ");
1546 pr_cont_cgroup_path(memcg->css.cgroup);
1547 pr_cont(":");
1548 seq_buf_init(&s, buf, sizeof(buf));
1549 memory_stat_format(memcg, &s);
1550 seq_buf_do_printk(&s, KERN_INFO);
1554 * Return the memory (and swap, if configured) limit for a memcg.
1556 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1558 unsigned long max = READ_ONCE(memcg->memory.max);
1560 if (do_memsw_account()) {
1561 if (mem_cgroup_swappiness(memcg)) {
1562 /* Calculate swap excess capacity from memsw limit */
1563 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1565 max += min(swap, (unsigned long)total_swap_pages);
1567 } else {
1568 if (mem_cgroup_swappiness(memcg))
1569 max += min(READ_ONCE(memcg->swap.max),
1570 (unsigned long)total_swap_pages);
1572 return max;
1575 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1577 return page_counter_read(&memcg->memory);
1580 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1581 int order)
1583 struct oom_control oc = {
1584 .zonelist = NULL,
1585 .nodemask = NULL,
1586 .memcg = memcg,
1587 .gfp_mask = gfp_mask,
1588 .order = order,
1590 bool ret = true;
1592 if (mutex_lock_killable(&oom_lock))
1593 return true;
1595 if (mem_cgroup_margin(memcg) >= (1 << order))
1596 goto unlock;
1599 * A few threads which were not waiting at mutex_lock_killable() can
1600 * fail to bail out. Therefore, check again after holding oom_lock.
1602 ret = task_is_dying() || out_of_memory(&oc);
1604 unlock:
1605 mutex_unlock(&oom_lock);
1606 return ret;
1610 * Returns true if successfully killed one or more processes. Though in some
1611 * corner cases it can return true even without killing any process.
1613 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1615 bool locked, ret;
1617 if (order > PAGE_ALLOC_COSTLY_ORDER)
1618 return false;
1620 memcg_memory_event(memcg, MEMCG_OOM);
1622 if (!memcg1_oom_prepare(memcg, &locked))
1623 return false;
1625 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1627 memcg1_oom_finish(memcg, locked);
1629 return ret;
1633 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1634 * @victim: task to be killed by the OOM killer
1635 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1637 * Returns a pointer to a memory cgroup, which has to be cleaned up
1638 * by killing all belonging OOM-killable tasks.
1640 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1642 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1643 struct mem_cgroup *oom_domain)
1645 struct mem_cgroup *oom_group = NULL;
1646 struct mem_cgroup *memcg;
1648 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1649 return NULL;
1651 if (!oom_domain)
1652 oom_domain = root_mem_cgroup;
1654 rcu_read_lock();
1656 memcg = mem_cgroup_from_task(victim);
1657 if (mem_cgroup_is_root(memcg))
1658 goto out;
1661 * If the victim task has been asynchronously moved to a different
1662 * memory cgroup, we might end up killing tasks outside oom_domain.
1663 * In this case it's better to ignore memory.group.oom.
1665 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1666 goto out;
1669 * Traverse the memory cgroup hierarchy from the victim task's
1670 * cgroup up to the OOMing cgroup (or root) to find the
1671 * highest-level memory cgroup with oom.group set.
1673 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1674 if (READ_ONCE(memcg->oom_group))
1675 oom_group = memcg;
1677 if (memcg == oom_domain)
1678 break;
1681 if (oom_group)
1682 css_get(&oom_group->css);
1683 out:
1684 rcu_read_unlock();
1686 return oom_group;
1689 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1691 pr_info("Tasks in ");
1692 pr_cont_cgroup_path(memcg->css.cgroup);
1693 pr_cont(" are going to be killed due to memory.oom.group set\n");
1696 struct memcg_stock_pcp {
1697 local_lock_t stock_lock;
1698 struct mem_cgroup *cached; /* this never be root cgroup */
1699 unsigned int nr_pages;
1701 struct obj_cgroup *cached_objcg;
1702 struct pglist_data *cached_pgdat;
1703 unsigned int nr_bytes;
1704 int nr_slab_reclaimable_b;
1705 int nr_slab_unreclaimable_b;
1707 struct work_struct work;
1708 unsigned long flags;
1709 #define FLUSHING_CACHED_CHARGE 0
1711 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1712 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
1714 static DEFINE_MUTEX(percpu_charge_mutex);
1716 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1717 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1718 struct mem_cgroup *root_memcg);
1721 * consume_stock: Try to consume stocked charge on this cpu.
1722 * @memcg: memcg to consume from.
1723 * @nr_pages: how many pages to charge.
1725 * The charges will only happen if @memcg matches the current cpu's memcg
1726 * stock, and at least @nr_pages are available in that stock. Failure to
1727 * service an allocation will refill the stock.
1729 * returns true if successful, false otherwise.
1731 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1733 struct memcg_stock_pcp *stock;
1734 unsigned int stock_pages;
1735 unsigned long flags;
1736 bool ret = false;
1738 if (nr_pages > MEMCG_CHARGE_BATCH)
1739 return ret;
1741 local_lock_irqsave(&memcg_stock.stock_lock, flags);
1743 stock = this_cpu_ptr(&memcg_stock);
1744 stock_pages = READ_ONCE(stock->nr_pages);
1745 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1746 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1747 ret = true;
1750 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1752 return ret;
1756 * Returns stocks cached in percpu and reset cached information.
1758 static void drain_stock(struct memcg_stock_pcp *stock)
1760 unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1761 struct mem_cgroup *old = READ_ONCE(stock->cached);
1763 if (!old)
1764 return;
1766 if (stock_pages) {
1767 page_counter_uncharge(&old->memory, stock_pages);
1768 if (do_memsw_account())
1769 page_counter_uncharge(&old->memsw, stock_pages);
1771 WRITE_ONCE(stock->nr_pages, 0);
1774 css_put(&old->css);
1775 WRITE_ONCE(stock->cached, NULL);
1778 static void drain_local_stock(struct work_struct *dummy)
1780 struct memcg_stock_pcp *stock;
1781 struct obj_cgroup *old = NULL;
1782 unsigned long flags;
1785 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1786 * drain_stock races is that we always operate on local CPU stock
1787 * here with IRQ disabled
1789 local_lock_irqsave(&memcg_stock.stock_lock, flags);
1791 stock = this_cpu_ptr(&memcg_stock);
1792 old = drain_obj_stock(stock);
1793 drain_stock(stock);
1794 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1796 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1797 obj_cgroup_put(old);
1801 * Cache charges(val) to local per_cpu area.
1802 * This will be consumed by consume_stock() function, later.
1804 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1806 struct memcg_stock_pcp *stock;
1807 unsigned int stock_pages;
1809 stock = this_cpu_ptr(&memcg_stock);
1810 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1811 drain_stock(stock);
1812 css_get(&memcg->css);
1813 WRITE_ONCE(stock->cached, memcg);
1815 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1816 WRITE_ONCE(stock->nr_pages, stock_pages);
1818 if (stock_pages > MEMCG_CHARGE_BATCH)
1819 drain_stock(stock);
1822 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1824 unsigned long flags;
1826 local_lock_irqsave(&memcg_stock.stock_lock, flags);
1827 __refill_stock(memcg, nr_pages);
1828 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1832 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1833 * of the hierarchy under it.
1835 void drain_all_stock(struct mem_cgroup *root_memcg)
1837 int cpu, curcpu;
1839 /* If someone's already draining, avoid adding running more workers. */
1840 if (!mutex_trylock(&percpu_charge_mutex))
1841 return;
1843 * Notify other cpus that system-wide "drain" is running
1844 * We do not care about races with the cpu hotplug because cpu down
1845 * as well as workers from this path always operate on the local
1846 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1848 migrate_disable();
1849 curcpu = smp_processor_id();
1850 for_each_online_cpu(cpu) {
1851 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1852 struct mem_cgroup *memcg;
1853 bool flush = false;
1855 rcu_read_lock();
1856 memcg = READ_ONCE(stock->cached);
1857 if (memcg && READ_ONCE(stock->nr_pages) &&
1858 mem_cgroup_is_descendant(memcg, root_memcg))
1859 flush = true;
1860 else if (obj_stock_flush_required(stock, root_memcg))
1861 flush = true;
1862 rcu_read_unlock();
1864 if (flush &&
1865 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1866 if (cpu == curcpu)
1867 drain_local_stock(&stock->work);
1868 else if (!cpu_is_isolated(cpu))
1869 schedule_work_on(cpu, &stock->work);
1872 migrate_enable();
1873 mutex_unlock(&percpu_charge_mutex);
1876 static int memcg_hotplug_cpu_dead(unsigned int cpu)
1878 struct memcg_stock_pcp *stock;
1880 stock = &per_cpu(memcg_stock, cpu);
1881 drain_stock(stock);
1883 return 0;
1886 static unsigned long reclaim_high(struct mem_cgroup *memcg,
1887 unsigned int nr_pages,
1888 gfp_t gfp_mask)
1890 unsigned long nr_reclaimed = 0;
1892 do {
1893 unsigned long pflags;
1895 if (page_counter_read(&memcg->memory) <=
1896 READ_ONCE(memcg->memory.high))
1897 continue;
1899 memcg_memory_event(memcg, MEMCG_HIGH);
1901 psi_memstall_enter(&pflags);
1902 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1903 gfp_mask,
1904 MEMCG_RECLAIM_MAY_SWAP,
1905 NULL);
1906 psi_memstall_leave(&pflags);
1907 } while ((memcg = parent_mem_cgroup(memcg)) &&
1908 !mem_cgroup_is_root(memcg));
1910 return nr_reclaimed;
1913 static void high_work_func(struct work_struct *work)
1915 struct mem_cgroup *memcg;
1917 memcg = container_of(work, struct mem_cgroup, high_work);
1918 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1922 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
1923 * enough to still cause a significant slowdown in most cases, while still
1924 * allowing diagnostics and tracing to proceed without becoming stuck.
1926 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
1929 * When calculating the delay, we use these either side of the exponentiation to
1930 * maintain precision and scale to a reasonable number of jiffies (see the table
1931 * below.
1933 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1934 * overage ratio to a delay.
1935 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1936 * proposed penalty in order to reduce to a reasonable number of jiffies, and
1937 * to produce a reasonable delay curve.
1939 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
1940 * reasonable delay curve compared to precision-adjusted overage, not
1941 * penalising heavily at first, but still making sure that growth beyond the
1942 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1943 * example, with a high of 100 megabytes:
1945 * +-------+------------------------+
1946 * | usage | time to allocate in ms |
1947 * +-------+------------------------+
1948 * | 100M | 0 |
1949 * | 101M | 6 |
1950 * | 102M | 25 |
1951 * | 103M | 57 |
1952 * | 104M | 102 |
1953 * | 105M | 159 |
1954 * | 106M | 230 |
1955 * | 107M | 313 |
1956 * | 108M | 409 |
1957 * | 109M | 518 |
1958 * | 110M | 639 |
1959 * | 111M | 774 |
1960 * | 112M | 921 |
1961 * | 113M | 1081 |
1962 * | 114M | 1254 |
1963 * | 115M | 1439 |
1964 * | 116M | 1638 |
1965 * | 117M | 1849 |
1966 * | 118M | 2000 |
1967 * | 119M | 2000 |
1968 * | 120M | 2000 |
1969 * +-------+------------------------+
1971 #define MEMCG_DELAY_PRECISION_SHIFT 20
1972 #define MEMCG_DELAY_SCALING_SHIFT 14
1974 static u64 calculate_overage(unsigned long usage, unsigned long high)
1976 u64 overage;
1978 if (usage <= high)
1979 return 0;
1982 * Prevent division by 0 in overage calculation by acting as if
1983 * it was a threshold of 1 page
1985 high = max(high, 1UL);
1987 overage = usage - high;
1988 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
1989 return div64_u64(overage, high);
1992 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
1994 u64 overage, max_overage = 0;
1996 do {
1997 overage = calculate_overage(page_counter_read(&memcg->memory),
1998 READ_ONCE(memcg->memory.high));
1999 max_overage = max(overage, max_overage);
2000 } while ((memcg = parent_mem_cgroup(memcg)) &&
2001 !mem_cgroup_is_root(memcg));
2003 return max_overage;
2006 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2008 u64 overage, max_overage = 0;
2010 do {
2011 overage = calculate_overage(page_counter_read(&memcg->swap),
2012 READ_ONCE(memcg->swap.high));
2013 if (overage)
2014 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2015 max_overage = max(overage, max_overage);
2016 } while ((memcg = parent_mem_cgroup(memcg)) &&
2017 !mem_cgroup_is_root(memcg));
2019 return max_overage;
2023 * Get the number of jiffies that we should penalise a mischievous cgroup which
2024 * is exceeding its memory.high by checking both it and its ancestors.
2026 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2027 unsigned int nr_pages,
2028 u64 max_overage)
2030 unsigned long penalty_jiffies;
2032 if (!max_overage)
2033 return 0;
2036 * We use overage compared to memory.high to calculate the number of
2037 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2038 * fairly lenient on small overages, and increasingly harsh when the
2039 * memcg in question makes it clear that it has no intention of stopping
2040 * its crazy behaviour, so we exponentially increase the delay based on
2041 * overage amount.
2043 penalty_jiffies = max_overage * max_overage * HZ;
2044 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2045 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2048 * Factor in the task's own contribution to the overage, such that four
2049 * N-sized allocations are throttled approximately the same as one
2050 * 4N-sized allocation.
2052 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2053 * larger the current charge patch is than that.
2055 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2059 * Reclaims memory over the high limit. Called directly from
2060 * try_charge() (context permitting), as well as from the userland
2061 * return path where reclaim is always able to block.
2063 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2065 unsigned long penalty_jiffies;
2066 unsigned long pflags;
2067 unsigned long nr_reclaimed;
2068 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2069 int nr_retries = MAX_RECLAIM_RETRIES;
2070 struct mem_cgroup *memcg;
2071 bool in_retry = false;
2073 if (likely(!nr_pages))
2074 return;
2076 memcg = get_mem_cgroup_from_mm(current->mm);
2077 current->memcg_nr_pages_over_high = 0;
2079 retry_reclaim:
2081 * Bail if the task is already exiting. Unlike memory.max,
2082 * memory.high enforcement isn't as strict, and there is no
2083 * OOM killer involved, which means the excess could already
2084 * be much bigger (and still growing) than it could for
2085 * memory.max; the dying task could get stuck in fruitless
2086 * reclaim for a long time, which isn't desirable.
2088 if (task_is_dying())
2089 goto out;
2092 * The allocating task should reclaim at least the batch size, but for
2093 * subsequent retries we only want to do what's necessary to prevent oom
2094 * or breaching resource isolation.
2096 * This is distinct from memory.max or page allocator behaviour because
2097 * memory.high is currently batched, whereas memory.max and the page
2098 * allocator run every time an allocation is made.
2100 nr_reclaimed = reclaim_high(memcg,
2101 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2102 gfp_mask);
2105 * memory.high is breached and reclaim is unable to keep up. Throttle
2106 * allocators proactively to slow down excessive growth.
2108 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2109 mem_find_max_overage(memcg));
2111 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2112 swap_find_max_overage(memcg));
2115 * Clamp the max delay per usermode return so as to still keep the
2116 * application moving forwards and also permit diagnostics, albeit
2117 * extremely slowly.
2119 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2122 * Don't sleep if the amount of jiffies this memcg owes us is so low
2123 * that it's not even worth doing, in an attempt to be nice to those who
2124 * go only a small amount over their memory.high value and maybe haven't
2125 * been aggressively reclaimed enough yet.
2127 if (penalty_jiffies <= HZ / 100)
2128 goto out;
2131 * If reclaim is making forward progress but we're still over
2132 * memory.high, we want to encourage that rather than doing allocator
2133 * throttling.
2135 if (nr_reclaimed || nr_retries--) {
2136 in_retry = true;
2137 goto retry_reclaim;
2141 * Reclaim didn't manage to push usage below the limit, slow
2142 * this allocating task down.
2144 * If we exit early, we're guaranteed to die (since
2145 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2146 * need to account for any ill-begotten jiffies to pay them off later.
2148 psi_memstall_enter(&pflags);
2149 schedule_timeout_killable(penalty_jiffies);
2150 psi_memstall_leave(&pflags);
2152 out:
2153 css_put(&memcg->css);
2156 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2157 unsigned int nr_pages)
2159 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2160 int nr_retries = MAX_RECLAIM_RETRIES;
2161 struct mem_cgroup *mem_over_limit;
2162 struct page_counter *counter;
2163 unsigned long nr_reclaimed;
2164 bool passed_oom = false;
2165 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2166 bool drained = false;
2167 bool raised_max_event = false;
2168 unsigned long pflags;
2170 retry:
2171 if (consume_stock(memcg, nr_pages))
2172 return 0;
2174 if (!do_memsw_account() ||
2175 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2176 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2177 goto done_restock;
2178 if (do_memsw_account())
2179 page_counter_uncharge(&memcg->memsw, batch);
2180 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2181 } else {
2182 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2183 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2186 if (batch > nr_pages) {
2187 batch = nr_pages;
2188 goto retry;
2192 * Prevent unbounded recursion when reclaim operations need to
2193 * allocate memory. This might exceed the limits temporarily,
2194 * but we prefer facilitating memory reclaim and getting back
2195 * under the limit over triggering OOM kills in these cases.
2197 if (unlikely(current->flags & PF_MEMALLOC))
2198 goto force;
2200 if (unlikely(task_in_memcg_oom(current)))
2201 goto nomem;
2203 if (!gfpflags_allow_blocking(gfp_mask))
2204 goto nomem;
2206 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2207 raised_max_event = true;
2209 psi_memstall_enter(&pflags);
2210 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2211 gfp_mask, reclaim_options, NULL);
2212 psi_memstall_leave(&pflags);
2214 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2215 goto retry;
2217 if (!drained) {
2218 drain_all_stock(mem_over_limit);
2219 drained = true;
2220 goto retry;
2223 if (gfp_mask & __GFP_NORETRY)
2224 goto nomem;
2226 * Even though the limit is exceeded at this point, reclaim
2227 * may have been able to free some pages. Retry the charge
2228 * before killing the task.
2230 * Only for regular pages, though: huge pages are rather
2231 * unlikely to succeed so close to the limit, and we fall back
2232 * to regular pages anyway in case of failure.
2234 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2235 goto retry;
2237 * At task move, charge accounts can be doubly counted. So, it's
2238 * better to wait until the end of task_move if something is going on.
2240 if (memcg1_wait_acct_move(mem_over_limit))
2241 goto retry;
2243 if (nr_retries--)
2244 goto retry;
2246 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2247 goto nomem;
2249 /* Avoid endless loop for tasks bypassed by the oom killer */
2250 if (passed_oom && task_is_dying())
2251 goto nomem;
2254 * keep retrying as long as the memcg oom killer is able to make
2255 * a forward progress or bypass the charge if the oom killer
2256 * couldn't make any progress.
2258 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2259 get_order(nr_pages * PAGE_SIZE))) {
2260 passed_oom = true;
2261 nr_retries = MAX_RECLAIM_RETRIES;
2262 goto retry;
2264 nomem:
2266 * Memcg doesn't have a dedicated reserve for atomic
2267 * allocations. But like the global atomic pool, we need to
2268 * put the burden of reclaim on regular allocation requests
2269 * and let these go through as privileged allocations.
2271 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2272 return -ENOMEM;
2273 force:
2275 * If the allocation has to be enforced, don't forget to raise
2276 * a MEMCG_MAX event.
2278 if (!raised_max_event)
2279 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2282 * The allocation either can't fail or will lead to more memory
2283 * being freed very soon. Allow memory usage go over the limit
2284 * temporarily by force charging it.
2286 page_counter_charge(&memcg->memory, nr_pages);
2287 if (do_memsw_account())
2288 page_counter_charge(&memcg->memsw, nr_pages);
2290 return 0;
2292 done_restock:
2293 if (batch > nr_pages)
2294 refill_stock(memcg, batch - nr_pages);
2297 * If the hierarchy is above the normal consumption range, schedule
2298 * reclaim on returning to userland. We can perform reclaim here
2299 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2300 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2301 * not recorded as it most likely matches current's and won't
2302 * change in the meantime. As high limit is checked again before
2303 * reclaim, the cost of mismatch is negligible.
2305 do {
2306 bool mem_high, swap_high;
2308 mem_high = page_counter_read(&memcg->memory) >
2309 READ_ONCE(memcg->memory.high);
2310 swap_high = page_counter_read(&memcg->swap) >
2311 READ_ONCE(memcg->swap.high);
2313 /* Don't bother a random interrupted task */
2314 if (!in_task()) {
2315 if (mem_high) {
2316 schedule_work(&memcg->high_work);
2317 break;
2319 continue;
2322 if (mem_high || swap_high) {
2324 * The allocating tasks in this cgroup will need to do
2325 * reclaim or be throttled to prevent further growth
2326 * of the memory or swap footprints.
2328 * Target some best-effort fairness between the tasks,
2329 * and distribute reclaim work and delay penalties
2330 * based on how much each task is actually allocating.
2332 current->memcg_nr_pages_over_high += batch;
2333 set_notify_resume(current);
2334 break;
2336 } while ((memcg = parent_mem_cgroup(memcg)));
2339 * Reclaim is set up above to be called from the userland
2340 * return path. But also attempt synchronous reclaim to avoid
2341 * excessive overrun while the task is still inside the
2342 * kernel. If this is successful, the return path will see it
2343 * when it rechecks the overage and simply bail out.
2345 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2346 !(current->flags & PF_MEMALLOC) &&
2347 gfpflags_allow_blocking(gfp_mask))
2348 mem_cgroup_handle_over_high(gfp_mask);
2349 return 0;
2353 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2354 * @memcg: memcg previously charged.
2355 * @nr_pages: number of pages previously charged.
2357 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2359 if (mem_cgroup_is_root(memcg))
2360 return;
2362 page_counter_uncharge(&memcg->memory, nr_pages);
2363 if (do_memsw_account())
2364 page_counter_uncharge(&memcg->memsw, nr_pages);
2367 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2369 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2371 * Any of the following ensures page's memcg stability:
2373 * - the page lock
2374 * - LRU isolation
2375 * - folio_memcg_lock()
2376 * - exclusive reference
2377 * - mem_cgroup_trylock_pages()
2379 folio->memcg_data = (unsigned long)memcg;
2383 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2384 * @folio: folio to commit the charge to.
2385 * @memcg: memcg previously charged.
2387 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2389 css_get(&memcg->css);
2390 commit_charge(folio, memcg);
2392 local_irq_disable();
2393 mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2394 memcg1_check_events(memcg, folio_nid(folio));
2395 local_irq_enable();
2398 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2399 struct pglist_data *pgdat,
2400 enum node_stat_item idx, int nr)
2402 struct mem_cgroup *memcg;
2403 struct lruvec *lruvec;
2405 rcu_read_lock();
2406 memcg = obj_cgroup_memcg(objcg);
2407 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2408 __mod_memcg_lruvec_state(lruvec, idx, nr);
2409 rcu_read_unlock();
2412 static __always_inline
2413 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2416 * Slab objects are accounted individually, not per-page.
2417 * Memcg membership data for each individual object is saved in
2418 * slab->obj_exts.
2420 if (folio_test_slab(folio)) {
2421 struct slabobj_ext *obj_exts;
2422 struct slab *slab;
2423 unsigned int off;
2425 slab = folio_slab(folio);
2426 obj_exts = slab_obj_exts(slab);
2427 if (!obj_exts)
2428 return NULL;
2430 off = obj_to_index(slab->slab_cache, slab, p);
2431 if (obj_exts[off].objcg)
2432 return obj_cgroup_memcg(obj_exts[off].objcg);
2434 return NULL;
2438 * folio_memcg_check() is used here, because in theory we can encounter
2439 * a folio where the slab flag has been cleared already, but
2440 * slab->obj_exts has not been freed yet
2441 * folio_memcg_check() will guarantee that a proper memory
2442 * cgroup pointer or NULL will be returned.
2444 return folio_memcg_check(folio);
2448 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2450 * A passed kernel object can be a slab object, vmalloc object or a generic
2451 * kernel page, so different mechanisms for getting the memory cgroup pointer
2452 * should be used.
2454 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2455 * can not know for sure how the kernel object is implemented.
2456 * mem_cgroup_from_obj() can be safely used in such cases.
2458 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2459 * cgroup_mutex, etc.
2461 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2463 struct folio *folio;
2465 if (mem_cgroup_disabled())
2466 return NULL;
2468 if (unlikely(is_vmalloc_addr(p)))
2469 folio = page_folio(vmalloc_to_page(p));
2470 else
2471 folio = virt_to_folio(p);
2473 return mem_cgroup_from_obj_folio(folio, p);
2477 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2478 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2479 * allocated using vmalloc().
2481 * A passed kernel object must be a slab object or a generic kernel page.
2483 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2484 * cgroup_mutex, etc.
2486 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2488 if (mem_cgroup_disabled())
2489 return NULL;
2491 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2494 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2496 struct obj_cgroup *objcg = NULL;
2498 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2499 objcg = rcu_dereference(memcg->objcg);
2500 if (likely(objcg && obj_cgroup_tryget(objcg)))
2501 break;
2502 objcg = NULL;
2504 return objcg;
2507 static struct obj_cgroup *current_objcg_update(void)
2509 struct mem_cgroup *memcg;
2510 struct obj_cgroup *old, *objcg = NULL;
2512 do {
2513 /* Atomically drop the update bit. */
2514 old = xchg(&current->objcg, NULL);
2515 if (old) {
2516 old = (struct obj_cgroup *)
2517 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2518 obj_cgroup_put(old);
2520 old = NULL;
2523 /* If new objcg is NULL, no reason for the second atomic update. */
2524 if (!current->mm || (current->flags & PF_KTHREAD))
2525 return NULL;
2528 * Release the objcg pointer from the previous iteration,
2529 * if try_cmpxcg() below fails.
2531 if (unlikely(objcg)) {
2532 obj_cgroup_put(objcg);
2533 objcg = NULL;
2537 * Obtain the new objcg pointer. The current task can be
2538 * asynchronously moved to another memcg and the previous
2539 * memcg can be offlined. So let's get the memcg pointer
2540 * and try get a reference to objcg under a rcu read lock.
2543 rcu_read_lock();
2544 memcg = mem_cgroup_from_task(current);
2545 objcg = __get_obj_cgroup_from_memcg(memcg);
2546 rcu_read_unlock();
2549 * Try set up a new objcg pointer atomically. If it
2550 * fails, it means the update flag was set concurrently, so
2551 * the whole procedure should be repeated.
2553 } while (!try_cmpxchg(&current->objcg, &old, objcg));
2555 return objcg;
2558 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2560 struct mem_cgroup *memcg;
2561 struct obj_cgroup *objcg;
2563 if (in_task()) {
2564 memcg = current->active_memcg;
2565 if (unlikely(memcg))
2566 goto from_memcg;
2568 objcg = READ_ONCE(current->objcg);
2569 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2570 objcg = current_objcg_update();
2572 * Objcg reference is kept by the task, so it's safe
2573 * to use the objcg by the current task.
2575 return objcg;
2578 memcg = this_cpu_read(int_active_memcg);
2579 if (unlikely(memcg))
2580 goto from_memcg;
2582 return NULL;
2584 from_memcg:
2585 objcg = NULL;
2586 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2588 * Memcg pointer is protected by scope (see set_active_memcg())
2589 * and is pinning the corresponding objcg, so objcg can't go
2590 * away and can be used within the scope without any additional
2591 * protection.
2593 objcg = rcu_dereference_check(memcg->objcg, 1);
2594 if (likely(objcg))
2595 break;
2598 return objcg;
2601 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2603 struct obj_cgroup *objcg;
2605 if (!memcg_kmem_online())
2606 return NULL;
2608 if (folio_memcg_kmem(folio)) {
2609 objcg = __folio_objcg(folio);
2610 obj_cgroup_get(objcg);
2611 } else {
2612 struct mem_cgroup *memcg;
2614 rcu_read_lock();
2615 memcg = __folio_memcg(folio);
2616 if (memcg)
2617 objcg = __get_obj_cgroup_from_memcg(memcg);
2618 else
2619 objcg = NULL;
2620 rcu_read_unlock();
2622 return objcg;
2626 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2627 * @objcg: object cgroup to uncharge
2628 * @nr_pages: number of pages to uncharge
2630 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2631 unsigned int nr_pages)
2633 struct mem_cgroup *memcg;
2635 memcg = get_mem_cgroup_from_objcg(objcg);
2637 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2638 memcg1_account_kmem(memcg, -nr_pages);
2639 refill_stock(memcg, nr_pages);
2641 css_put(&memcg->css);
2645 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2646 * @objcg: object cgroup to charge
2647 * @gfp: reclaim mode
2648 * @nr_pages: number of pages to charge
2650 * Returns 0 on success, an error code on failure.
2652 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2653 unsigned int nr_pages)
2655 struct mem_cgroup *memcg;
2656 int ret;
2658 memcg = get_mem_cgroup_from_objcg(objcg);
2660 ret = try_charge_memcg(memcg, gfp, nr_pages);
2661 if (ret)
2662 goto out;
2664 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2665 memcg1_account_kmem(memcg, nr_pages);
2666 out:
2667 css_put(&memcg->css);
2669 return ret;
2673 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2674 * @page: page to charge
2675 * @gfp: reclaim mode
2676 * @order: allocation order
2678 * Returns 0 on success, an error code on failure.
2680 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2682 struct obj_cgroup *objcg;
2683 int ret = 0;
2685 objcg = current_obj_cgroup();
2686 if (objcg) {
2687 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2688 if (!ret) {
2689 obj_cgroup_get(objcg);
2690 page->memcg_data = (unsigned long)objcg |
2691 MEMCG_DATA_KMEM;
2692 return 0;
2695 return ret;
2699 * __memcg_kmem_uncharge_page: uncharge a kmem page
2700 * @page: page to uncharge
2701 * @order: allocation order
2703 void __memcg_kmem_uncharge_page(struct page *page, int order)
2705 struct folio *folio = page_folio(page);
2706 struct obj_cgroup *objcg;
2707 unsigned int nr_pages = 1 << order;
2709 if (!folio_memcg_kmem(folio))
2710 return;
2712 objcg = __folio_objcg(folio);
2713 obj_cgroup_uncharge_pages(objcg, nr_pages);
2714 folio->memcg_data = 0;
2715 obj_cgroup_put(objcg);
2718 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2719 enum node_stat_item idx, int nr)
2721 struct memcg_stock_pcp *stock;
2722 struct obj_cgroup *old = NULL;
2723 unsigned long flags;
2724 int *bytes;
2726 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2727 stock = this_cpu_ptr(&memcg_stock);
2730 * Save vmstat data in stock and skip vmstat array update unless
2731 * accumulating over a page of vmstat data or when pgdat or idx
2732 * changes.
2734 if (READ_ONCE(stock->cached_objcg) != objcg) {
2735 old = drain_obj_stock(stock);
2736 obj_cgroup_get(objcg);
2737 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2738 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2739 WRITE_ONCE(stock->cached_objcg, objcg);
2740 stock->cached_pgdat = pgdat;
2741 } else if (stock->cached_pgdat != pgdat) {
2742 /* Flush the existing cached vmstat data */
2743 struct pglist_data *oldpg = stock->cached_pgdat;
2745 if (stock->nr_slab_reclaimable_b) {
2746 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2747 stock->nr_slab_reclaimable_b);
2748 stock->nr_slab_reclaimable_b = 0;
2750 if (stock->nr_slab_unreclaimable_b) {
2751 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2752 stock->nr_slab_unreclaimable_b);
2753 stock->nr_slab_unreclaimable_b = 0;
2755 stock->cached_pgdat = pgdat;
2758 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2759 : &stock->nr_slab_unreclaimable_b;
2761 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2762 * cached locally at least once before pushing it out.
2764 if (!*bytes) {
2765 *bytes = nr;
2766 nr = 0;
2767 } else {
2768 *bytes += nr;
2769 if (abs(*bytes) > PAGE_SIZE) {
2770 nr = *bytes;
2771 *bytes = 0;
2772 } else {
2773 nr = 0;
2776 if (nr)
2777 __mod_objcg_mlstate(objcg, pgdat, idx, nr);
2779 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2780 obj_cgroup_put(old);
2783 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2785 struct memcg_stock_pcp *stock;
2786 unsigned long flags;
2787 bool ret = false;
2789 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2791 stock = this_cpu_ptr(&memcg_stock);
2792 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2793 stock->nr_bytes -= nr_bytes;
2794 ret = true;
2797 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2799 return ret;
2802 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2804 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2806 if (!old)
2807 return NULL;
2809 if (stock->nr_bytes) {
2810 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2811 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2813 if (nr_pages) {
2814 struct mem_cgroup *memcg;
2816 memcg = get_mem_cgroup_from_objcg(old);
2818 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2819 memcg1_account_kmem(memcg, -nr_pages);
2820 __refill_stock(memcg, nr_pages);
2822 css_put(&memcg->css);
2826 * The leftover is flushed to the centralized per-memcg value.
2827 * On the next attempt to refill obj stock it will be moved
2828 * to a per-cpu stock (probably, on an other CPU), see
2829 * refill_obj_stock().
2831 * How often it's flushed is a trade-off between the memory
2832 * limit enforcement accuracy and potential CPU contention,
2833 * so it might be changed in the future.
2835 atomic_add(nr_bytes, &old->nr_charged_bytes);
2836 stock->nr_bytes = 0;
2840 * Flush the vmstat data in current stock
2842 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2843 if (stock->nr_slab_reclaimable_b) {
2844 __mod_objcg_mlstate(old, stock->cached_pgdat,
2845 NR_SLAB_RECLAIMABLE_B,
2846 stock->nr_slab_reclaimable_b);
2847 stock->nr_slab_reclaimable_b = 0;
2849 if (stock->nr_slab_unreclaimable_b) {
2850 __mod_objcg_mlstate(old, stock->cached_pgdat,
2851 NR_SLAB_UNRECLAIMABLE_B,
2852 stock->nr_slab_unreclaimable_b);
2853 stock->nr_slab_unreclaimable_b = 0;
2855 stock->cached_pgdat = NULL;
2858 WRITE_ONCE(stock->cached_objcg, NULL);
2860 * The `old' objects needs to be released by the caller via
2861 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2863 return old;
2866 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2867 struct mem_cgroup *root_memcg)
2869 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2870 struct mem_cgroup *memcg;
2872 if (objcg) {
2873 memcg = obj_cgroup_memcg(objcg);
2874 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2875 return true;
2878 return false;
2881 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2882 bool allow_uncharge)
2884 struct memcg_stock_pcp *stock;
2885 struct obj_cgroup *old = NULL;
2886 unsigned long flags;
2887 unsigned int nr_pages = 0;
2889 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2891 stock = this_cpu_ptr(&memcg_stock);
2892 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2893 old = drain_obj_stock(stock);
2894 obj_cgroup_get(objcg);
2895 WRITE_ONCE(stock->cached_objcg, objcg);
2896 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2897 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2898 allow_uncharge = true; /* Allow uncharge when objcg changes */
2900 stock->nr_bytes += nr_bytes;
2902 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2903 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2904 stock->nr_bytes &= (PAGE_SIZE - 1);
2907 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2908 obj_cgroup_put(old);
2910 if (nr_pages)
2911 obj_cgroup_uncharge_pages(objcg, nr_pages);
2914 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2916 unsigned int nr_pages, nr_bytes;
2917 int ret;
2919 if (consume_obj_stock(objcg, size))
2920 return 0;
2923 * In theory, objcg->nr_charged_bytes can have enough
2924 * pre-charged bytes to satisfy the allocation. However,
2925 * flushing objcg->nr_charged_bytes requires two atomic
2926 * operations, and objcg->nr_charged_bytes can't be big.
2927 * The shared objcg->nr_charged_bytes can also become a
2928 * performance bottleneck if all tasks of the same memcg are
2929 * trying to update it. So it's better to ignore it and try
2930 * grab some new pages. The stock's nr_bytes will be flushed to
2931 * objcg->nr_charged_bytes later on when objcg changes.
2933 * The stock's nr_bytes may contain enough pre-charged bytes
2934 * to allow one less page from being charged, but we can't rely
2935 * on the pre-charged bytes not being changed outside of
2936 * consume_obj_stock() or refill_obj_stock(). So ignore those
2937 * pre-charged bytes as well when charging pages. To avoid a
2938 * page uncharge right after a page charge, we set the
2939 * allow_uncharge flag to false when calling refill_obj_stock()
2940 * to temporarily allow the pre-charged bytes to exceed the page
2941 * size limit. The maximum reachable value of the pre-charged
2942 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2943 * race.
2945 nr_pages = size >> PAGE_SHIFT;
2946 nr_bytes = size & (PAGE_SIZE - 1);
2948 if (nr_bytes)
2949 nr_pages += 1;
2951 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
2952 if (!ret && nr_bytes)
2953 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2955 return ret;
2958 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
2960 refill_obj_stock(objcg, size, true);
2963 static inline size_t obj_full_size(struct kmem_cache *s)
2966 * For each accounted object there is an extra space which is used
2967 * to store obj_cgroup membership. Charge it too.
2969 return s->size + sizeof(struct obj_cgroup *);
2972 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2973 gfp_t flags, size_t size, void **p)
2975 struct obj_cgroup *objcg;
2976 struct slab *slab;
2977 unsigned long off;
2978 size_t i;
2981 * The obtained objcg pointer is safe to use within the current scope,
2982 * defined by current task or set_active_memcg() pair.
2983 * obj_cgroup_get() is used to get a permanent reference.
2985 objcg = current_obj_cgroup();
2986 if (!objcg)
2987 return true;
2990 * slab_alloc_node() avoids the NULL check, so we might be called with a
2991 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
2992 * the whole requested size.
2993 * return success as there's nothing to free back
2995 if (unlikely(*p == NULL))
2996 return true;
2998 flags &= gfp_allowed_mask;
3000 if (lru) {
3001 int ret;
3002 struct mem_cgroup *memcg;
3004 memcg = get_mem_cgroup_from_objcg(objcg);
3005 ret = memcg_list_lru_alloc(memcg, lru, flags);
3006 css_put(&memcg->css);
3008 if (ret)
3009 return false;
3012 if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
3013 return false;
3015 for (i = 0; i < size; i++) {
3016 slab = virt_to_slab(p[i]);
3018 if (!slab_obj_exts(slab) &&
3019 alloc_slab_obj_exts(slab, s, flags, false)) {
3020 obj_cgroup_uncharge(objcg, obj_full_size(s));
3021 continue;
3024 off = obj_to_index(s, slab, p[i]);
3025 obj_cgroup_get(objcg);
3026 slab_obj_exts(slab)[off].objcg = objcg;
3027 mod_objcg_state(objcg, slab_pgdat(slab),
3028 cache_vmstat_idx(s), obj_full_size(s));
3031 return true;
3034 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3035 void **p, int objects, struct slabobj_ext *obj_exts)
3037 for (int i = 0; i < objects; i++) {
3038 struct obj_cgroup *objcg;
3039 unsigned int off;
3041 off = obj_to_index(s, slab, p[i]);
3042 objcg = obj_exts[off].objcg;
3043 if (!objcg)
3044 continue;
3046 obj_exts[off].objcg = NULL;
3047 obj_cgroup_uncharge(objcg, obj_full_size(s));
3048 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3049 -obj_full_size(s));
3050 obj_cgroup_put(objcg);
3055 * Because folio_memcg(head) is not set on tails, set it now.
3057 void split_page_memcg(struct page *head, int old_order, int new_order)
3059 struct folio *folio = page_folio(head);
3060 struct mem_cgroup *memcg = folio_memcg(folio);
3061 int i;
3062 unsigned int old_nr = 1 << old_order;
3063 unsigned int new_nr = 1 << new_order;
3065 if (mem_cgroup_disabled() || !memcg)
3066 return;
3068 for (i = new_nr; i < old_nr; i += new_nr)
3069 folio_page(folio, i)->memcg_data = folio->memcg_data;
3071 if (folio_memcg_kmem(folio))
3072 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3073 else
3074 css_get_many(&memcg->css, old_nr / new_nr - 1);
3077 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3079 unsigned long val;
3081 if (mem_cgroup_is_root(memcg)) {
3083 * Approximate root's usage from global state. This isn't
3084 * perfect, but the root usage was always an approximation.
3086 val = global_node_page_state(NR_FILE_PAGES) +
3087 global_node_page_state(NR_ANON_MAPPED);
3088 if (swap)
3089 val += total_swap_pages - get_nr_swap_pages();
3090 } else {
3091 if (!swap)
3092 val = page_counter_read(&memcg->memory);
3093 else
3094 val = page_counter_read(&memcg->memsw);
3096 return val;
3099 static int memcg_online_kmem(struct mem_cgroup *memcg)
3101 struct obj_cgroup *objcg;
3103 if (mem_cgroup_kmem_disabled())
3104 return 0;
3106 if (unlikely(mem_cgroup_is_root(memcg)))
3107 return 0;
3109 objcg = obj_cgroup_alloc();
3110 if (!objcg)
3111 return -ENOMEM;
3113 objcg->memcg = memcg;
3114 rcu_assign_pointer(memcg->objcg, objcg);
3115 obj_cgroup_get(objcg);
3116 memcg->orig_objcg = objcg;
3118 static_branch_enable(&memcg_kmem_online_key);
3120 memcg->kmemcg_id = memcg->id.id;
3122 return 0;
3125 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3127 struct mem_cgroup *parent;
3129 if (mem_cgroup_kmem_disabled())
3130 return;
3132 if (unlikely(mem_cgroup_is_root(memcg)))
3133 return;
3135 parent = parent_mem_cgroup(memcg);
3136 if (!parent)
3137 parent = root_mem_cgroup;
3139 memcg_reparent_objcgs(memcg, parent);
3142 * After we have finished memcg_reparent_objcgs(), all list_lrus
3143 * corresponding to this cgroup are guaranteed to remain empty.
3144 * The ordering is imposed by list_lru_node->lock taken by
3145 * memcg_reparent_list_lrus().
3147 memcg_reparent_list_lrus(memcg, parent);
3150 #ifdef CONFIG_CGROUP_WRITEBACK
3152 #include <trace/events/writeback.h>
3154 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3156 return wb_domain_init(&memcg->cgwb_domain, gfp);
3159 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3161 wb_domain_exit(&memcg->cgwb_domain);
3164 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3166 wb_domain_size_changed(&memcg->cgwb_domain);
3169 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3171 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3173 if (!memcg->css.parent)
3174 return NULL;
3176 return &memcg->cgwb_domain;
3180 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3181 * @wb: bdi_writeback in question
3182 * @pfilepages: out parameter for number of file pages
3183 * @pheadroom: out parameter for number of allocatable pages according to memcg
3184 * @pdirty: out parameter for number of dirty pages
3185 * @pwriteback: out parameter for number of pages under writeback
3187 * Determine the numbers of file, headroom, dirty, and writeback pages in
3188 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3189 * is a bit more involved.
3191 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3192 * headroom is calculated as the lowest headroom of itself and the
3193 * ancestors. Note that this doesn't consider the actual amount of
3194 * available memory in the system. The caller should further cap
3195 * *@pheadroom accordingly.
3197 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3198 unsigned long *pheadroom, unsigned long *pdirty,
3199 unsigned long *pwriteback)
3201 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3202 struct mem_cgroup *parent;
3204 mem_cgroup_flush_stats_ratelimited(memcg);
3206 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3207 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3208 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3209 memcg_page_state(memcg, NR_ACTIVE_FILE);
3211 *pheadroom = PAGE_COUNTER_MAX;
3212 while ((parent = parent_mem_cgroup(memcg))) {
3213 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3214 READ_ONCE(memcg->memory.high));
3215 unsigned long used = page_counter_read(&memcg->memory);
3217 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3218 memcg = parent;
3223 * Foreign dirty flushing
3225 * There's an inherent mismatch between memcg and writeback. The former
3226 * tracks ownership per-page while the latter per-inode. This was a
3227 * deliberate design decision because honoring per-page ownership in the
3228 * writeback path is complicated, may lead to higher CPU and IO overheads
3229 * and deemed unnecessary given that write-sharing an inode across
3230 * different cgroups isn't a common use-case.
3232 * Combined with inode majority-writer ownership switching, this works well
3233 * enough in most cases but there are some pathological cases. For
3234 * example, let's say there are two cgroups A and B which keep writing to
3235 * different but confined parts of the same inode. B owns the inode and
3236 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3237 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3238 * triggering background writeback. A will be slowed down without a way to
3239 * make writeback of the dirty pages happen.
3241 * Conditions like the above can lead to a cgroup getting repeatedly and
3242 * severely throttled after making some progress after each
3243 * dirty_expire_interval while the underlying IO device is almost
3244 * completely idle.
3246 * Solving this problem completely requires matching the ownership tracking
3247 * granularities between memcg and writeback in either direction. However,
3248 * the more egregious behaviors can be avoided by simply remembering the
3249 * most recent foreign dirtying events and initiating remote flushes on
3250 * them when local writeback isn't enough to keep the memory clean enough.
3252 * The following two functions implement such mechanism. When a foreign
3253 * page - a page whose memcg and writeback ownerships don't match - is
3254 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3255 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3256 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3257 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3258 * foreign bdi_writebacks which haven't expired. Both the numbers of
3259 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3260 * limited to MEMCG_CGWB_FRN_CNT.
3262 * The mechanism only remembers IDs and doesn't hold any object references.
3263 * As being wrong occasionally doesn't matter, updates and accesses to the
3264 * records are lockless and racy.
3266 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3267 struct bdi_writeback *wb)
3269 struct mem_cgroup *memcg = folio_memcg(folio);
3270 struct memcg_cgwb_frn *frn;
3271 u64 now = get_jiffies_64();
3272 u64 oldest_at = now;
3273 int oldest = -1;
3274 int i;
3276 trace_track_foreign_dirty(folio, wb);
3279 * Pick the slot to use. If there is already a slot for @wb, keep
3280 * using it. If not replace the oldest one which isn't being
3281 * written out.
3283 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3284 frn = &memcg->cgwb_frn[i];
3285 if (frn->bdi_id == wb->bdi->id &&
3286 frn->memcg_id == wb->memcg_css->id)
3287 break;
3288 if (time_before64(frn->at, oldest_at) &&
3289 atomic_read(&frn->done.cnt) == 1) {
3290 oldest = i;
3291 oldest_at = frn->at;
3295 if (i < MEMCG_CGWB_FRN_CNT) {
3297 * Re-using an existing one. Update timestamp lazily to
3298 * avoid making the cacheline hot. We want them to be
3299 * reasonably up-to-date and significantly shorter than
3300 * dirty_expire_interval as that's what expires the record.
3301 * Use the shorter of 1s and dirty_expire_interval / 8.
3303 unsigned long update_intv =
3304 min_t(unsigned long, HZ,
3305 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3307 if (time_before64(frn->at, now - update_intv))
3308 frn->at = now;
3309 } else if (oldest >= 0) {
3310 /* replace the oldest free one */
3311 frn = &memcg->cgwb_frn[oldest];
3312 frn->bdi_id = wb->bdi->id;
3313 frn->memcg_id = wb->memcg_css->id;
3314 frn->at = now;
3318 /* issue foreign writeback flushes for recorded foreign dirtying events */
3319 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3321 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3322 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3323 u64 now = jiffies_64;
3324 int i;
3326 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3327 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3330 * If the record is older than dirty_expire_interval,
3331 * writeback on it has already started. No need to kick it
3332 * off again. Also, don't start a new one if there's
3333 * already one in flight.
3335 if (time_after64(frn->at, now - intv) &&
3336 atomic_read(&frn->done.cnt) == 1) {
3337 frn->at = 0;
3338 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3339 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3340 WB_REASON_FOREIGN_FLUSH,
3341 &frn->done);
3346 #else /* CONFIG_CGROUP_WRITEBACK */
3348 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3350 return 0;
3353 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3357 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3361 #endif /* CONFIG_CGROUP_WRITEBACK */
3364 * Private memory cgroup IDR
3366 * Swap-out records and page cache shadow entries need to store memcg
3367 * references in constrained space, so we maintain an ID space that is
3368 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3369 * memory-controlled cgroups to 64k.
3371 * However, there usually are many references to the offline CSS after
3372 * the cgroup has been destroyed, such as page cache or reclaimable
3373 * slab objects, that don't need to hang on to the ID. We want to keep
3374 * those dead CSS from occupying IDs, or we might quickly exhaust the
3375 * relatively small ID space and prevent the creation of new cgroups
3376 * even when there are much fewer than 64k cgroups - possibly none.
3378 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3379 * be freed and recycled when it's no longer needed, which is usually
3380 * when the CSS is offlined.
3382 * The only exception to that are records of swapped out tmpfs/shmem
3383 * pages that need to be attributed to live ancestors on swapin. But
3384 * those references are manageable from userspace.
3387 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3388 static DEFINE_IDR(mem_cgroup_idr);
3390 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3392 if (memcg->id.id > 0) {
3393 idr_remove(&mem_cgroup_idr, memcg->id.id);
3394 memcg->id.id = 0;
3398 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3399 unsigned int n)
3401 refcount_add(n, &memcg->id.ref);
3404 void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3406 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3407 mem_cgroup_id_remove(memcg);
3409 /* Memcg ID pins CSS */
3410 css_put(&memcg->css);
3414 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3416 mem_cgroup_id_put_many(memcg, 1);
3420 * mem_cgroup_from_id - look up a memcg from a memcg id
3421 * @id: the memcg id to look up
3423 * Caller must hold rcu_read_lock().
3425 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3427 WARN_ON_ONCE(!rcu_read_lock_held());
3428 return idr_find(&mem_cgroup_idr, id);
3431 #ifdef CONFIG_SHRINKER_DEBUG
3432 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3434 struct cgroup *cgrp;
3435 struct cgroup_subsys_state *css;
3436 struct mem_cgroup *memcg;
3438 cgrp = cgroup_get_from_id(ino);
3439 if (IS_ERR(cgrp))
3440 return ERR_CAST(cgrp);
3442 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3443 if (css)
3444 memcg = container_of(css, struct mem_cgroup, css);
3445 else
3446 memcg = ERR_PTR(-ENOENT);
3448 cgroup_put(cgrp);
3450 return memcg;
3452 #endif
3454 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3456 struct mem_cgroup_per_node *pn;
3458 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3459 if (!pn)
3460 return false;
3462 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3463 GFP_KERNEL_ACCOUNT, node);
3464 if (!pn->lruvec_stats)
3465 goto fail;
3467 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3468 GFP_KERNEL_ACCOUNT);
3469 if (!pn->lruvec_stats_percpu)
3470 goto fail;
3472 lruvec_init(&pn->lruvec);
3473 pn->memcg = memcg;
3475 memcg->nodeinfo[node] = pn;
3476 return true;
3477 fail:
3478 kfree(pn->lruvec_stats);
3479 kfree(pn);
3480 return false;
3483 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3485 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3487 if (!pn)
3488 return;
3490 free_percpu(pn->lruvec_stats_percpu);
3491 kfree(pn->lruvec_stats);
3492 kfree(pn);
3495 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3497 int node;
3499 obj_cgroup_put(memcg->orig_objcg);
3501 for_each_node(node)
3502 free_mem_cgroup_per_node_info(memcg, node);
3503 kfree(memcg->vmstats);
3504 free_percpu(memcg->vmstats_percpu);
3505 kfree(memcg);
3508 static void mem_cgroup_free(struct mem_cgroup *memcg)
3510 lru_gen_exit_memcg(memcg);
3511 memcg_wb_domain_exit(memcg);
3512 __mem_cgroup_free(memcg);
3515 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3517 struct memcg_vmstats_percpu *statc, *pstatc;
3518 struct mem_cgroup *memcg;
3519 int node, cpu;
3520 int __maybe_unused i;
3521 long error = -ENOMEM;
3523 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3524 if (!memcg)
3525 return ERR_PTR(error);
3527 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
3528 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
3529 if (memcg->id.id < 0) {
3530 error = memcg->id.id;
3531 goto fail;
3534 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3535 GFP_KERNEL_ACCOUNT);
3536 if (!memcg->vmstats)
3537 goto fail;
3539 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3540 GFP_KERNEL_ACCOUNT);
3541 if (!memcg->vmstats_percpu)
3542 goto fail;
3544 for_each_possible_cpu(cpu) {
3545 if (parent)
3546 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3547 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3548 statc->parent = parent ? pstatc : NULL;
3549 statc->vmstats = memcg->vmstats;
3552 for_each_node(node)
3553 if (!alloc_mem_cgroup_per_node_info(memcg, node))
3554 goto fail;
3556 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3557 goto fail;
3559 INIT_WORK(&memcg->high_work, high_work_func);
3560 vmpressure_init(&memcg->vmpressure);
3561 memcg->socket_pressure = jiffies;
3562 memcg1_memcg_init(memcg);
3563 memcg->kmemcg_id = -1;
3564 INIT_LIST_HEAD(&memcg->objcg_list);
3565 #ifdef CONFIG_CGROUP_WRITEBACK
3566 INIT_LIST_HEAD(&memcg->cgwb_list);
3567 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3568 memcg->cgwb_frn[i].done =
3569 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3570 #endif
3571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3572 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3573 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3574 memcg->deferred_split_queue.split_queue_len = 0;
3575 #endif
3576 lru_gen_init_memcg(memcg);
3577 return memcg;
3578 fail:
3579 mem_cgroup_id_remove(memcg);
3580 __mem_cgroup_free(memcg);
3581 return ERR_PTR(error);
3584 static struct cgroup_subsys_state * __ref
3585 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3587 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3588 struct mem_cgroup *memcg, *old_memcg;
3590 old_memcg = set_active_memcg(parent);
3591 memcg = mem_cgroup_alloc(parent);
3592 set_active_memcg(old_memcg);
3593 if (IS_ERR(memcg))
3594 return ERR_CAST(memcg);
3596 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3597 memcg1_soft_limit_reset(memcg);
3598 #ifdef CONFIG_ZSWAP
3599 memcg->zswap_max = PAGE_COUNTER_MAX;
3600 WRITE_ONCE(memcg->zswap_writeback,
3601 !parent || READ_ONCE(parent->zswap_writeback));
3602 #endif
3603 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3604 if (parent) {
3605 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3607 page_counter_init(&memcg->memory, &parent->memory);
3608 page_counter_init(&memcg->swap, &parent->swap);
3609 #ifdef CONFIG_MEMCG_V1
3610 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3611 page_counter_init(&memcg->kmem, &parent->kmem);
3612 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
3613 #endif
3614 } else {
3615 init_memcg_stats();
3616 init_memcg_events();
3617 page_counter_init(&memcg->memory, NULL);
3618 page_counter_init(&memcg->swap, NULL);
3619 #ifdef CONFIG_MEMCG_V1
3620 page_counter_init(&memcg->kmem, NULL);
3621 page_counter_init(&memcg->tcpmem, NULL);
3622 #endif
3623 root_mem_cgroup = memcg;
3624 return &memcg->css;
3627 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3628 static_branch_inc(&memcg_sockets_enabled_key);
3630 if (!cgroup_memory_nobpf)
3631 static_branch_inc(&memcg_bpf_enabled_key);
3633 return &memcg->css;
3636 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3638 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3640 if (memcg_online_kmem(memcg))
3641 goto remove_id;
3644 * A memcg must be visible for expand_shrinker_info()
3645 * by the time the maps are allocated. So, we allocate maps
3646 * here, when for_each_mem_cgroup() can't skip it.
3648 if (alloc_shrinker_info(memcg))
3649 goto offline_kmem;
3651 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3652 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3653 FLUSH_TIME);
3654 lru_gen_online_memcg(memcg);
3656 /* Online state pins memcg ID, memcg ID pins CSS */
3657 refcount_set(&memcg->id.ref, 1);
3658 css_get(css);
3661 * Ensure mem_cgroup_from_id() works once we're fully online.
3663 * We could do this earlier and require callers to filter with
3664 * css_tryget_online(). But right now there are no users that
3665 * need earlier access, and the workingset code relies on the
3666 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3667 * publish it here at the end of onlining. This matches the
3668 * regular ID destruction during offlining.
3670 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
3672 return 0;
3673 offline_kmem:
3674 memcg_offline_kmem(memcg);
3675 remove_id:
3676 mem_cgroup_id_remove(memcg);
3677 return -ENOMEM;
3680 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3682 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3684 memcg1_css_offline(memcg);
3686 page_counter_set_min(&memcg->memory, 0);
3687 page_counter_set_low(&memcg->memory, 0);
3689 zswap_memcg_offline_cleanup(memcg);
3691 memcg_offline_kmem(memcg);
3692 reparent_shrinker_deferred(memcg);
3693 wb_memcg_offline(memcg);
3694 lru_gen_offline_memcg(memcg);
3696 drain_all_stock(memcg);
3698 mem_cgroup_id_put(memcg);
3701 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3703 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3705 invalidate_reclaim_iterators(memcg);
3706 lru_gen_release_memcg(memcg);
3709 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3711 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3712 int __maybe_unused i;
3714 #ifdef CONFIG_CGROUP_WRITEBACK
3715 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3716 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3717 #endif
3718 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3719 static_branch_dec(&memcg_sockets_enabled_key);
3721 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3722 static_branch_dec(&memcg_sockets_enabled_key);
3724 if (!cgroup_memory_nobpf)
3725 static_branch_dec(&memcg_bpf_enabled_key);
3727 vmpressure_cleanup(&memcg->vmpressure);
3728 cancel_work_sync(&memcg->high_work);
3729 memcg1_remove_from_trees(memcg);
3730 free_shrinker_info(memcg);
3731 mem_cgroup_free(memcg);
3735 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3736 * @css: the target css
3738 * Reset the states of the mem_cgroup associated with @css. This is
3739 * invoked when the userland requests disabling on the default hierarchy
3740 * but the memcg is pinned through dependency. The memcg should stop
3741 * applying policies and should revert to the vanilla state as it may be
3742 * made visible again.
3744 * The current implementation only resets the essential configurations.
3745 * This needs to be expanded to cover all the visible parts.
3747 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3749 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3751 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3752 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3753 #ifdef CONFIG_MEMCG_V1
3754 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3755 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3756 #endif
3757 page_counter_set_min(&memcg->memory, 0);
3758 page_counter_set_low(&memcg->memory, 0);
3759 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3760 memcg1_soft_limit_reset(memcg);
3761 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3762 memcg_wb_domain_size_changed(memcg);
3765 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3767 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3768 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3769 struct memcg_vmstats_percpu *statc;
3770 long delta, delta_cpu, v;
3771 int i, nid;
3773 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3775 for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) {
3777 * Collect the aggregated propagation counts of groups
3778 * below us. We're in a per-cpu loop here and this is
3779 * a global counter, so the first cycle will get them.
3781 delta = memcg->vmstats->state_pending[i];
3782 if (delta)
3783 memcg->vmstats->state_pending[i] = 0;
3785 /* Add CPU changes on this level since the last flush */
3786 delta_cpu = 0;
3787 v = READ_ONCE(statc->state[i]);
3788 if (v != statc->state_prev[i]) {
3789 delta_cpu = v - statc->state_prev[i];
3790 delta += delta_cpu;
3791 statc->state_prev[i] = v;
3794 /* Aggregate counts on this level and propagate upwards */
3795 if (delta_cpu)
3796 memcg->vmstats->state_local[i] += delta_cpu;
3798 if (delta) {
3799 memcg->vmstats->state[i] += delta;
3800 if (parent)
3801 parent->vmstats->state_pending[i] += delta;
3805 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
3806 delta = memcg->vmstats->events_pending[i];
3807 if (delta)
3808 memcg->vmstats->events_pending[i] = 0;
3810 delta_cpu = 0;
3811 v = READ_ONCE(statc->events[i]);
3812 if (v != statc->events_prev[i]) {
3813 delta_cpu = v - statc->events_prev[i];
3814 delta += delta_cpu;
3815 statc->events_prev[i] = v;
3818 if (delta_cpu)
3819 memcg->vmstats->events_local[i] += delta_cpu;
3821 if (delta) {
3822 memcg->vmstats->events[i] += delta;
3823 if (parent)
3824 parent->vmstats->events_pending[i] += delta;
3828 for_each_node_state(nid, N_MEMORY) {
3829 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3830 struct lruvec_stats *lstats = pn->lruvec_stats;
3831 struct lruvec_stats *plstats = NULL;
3832 struct lruvec_stats_percpu *lstatc;
3834 if (parent)
3835 plstats = parent->nodeinfo[nid]->lruvec_stats;
3837 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3839 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) {
3840 delta = lstats->state_pending[i];
3841 if (delta)
3842 lstats->state_pending[i] = 0;
3844 delta_cpu = 0;
3845 v = READ_ONCE(lstatc->state[i]);
3846 if (v != lstatc->state_prev[i]) {
3847 delta_cpu = v - lstatc->state_prev[i];
3848 delta += delta_cpu;
3849 lstatc->state_prev[i] = v;
3852 if (delta_cpu)
3853 lstats->state_local[i] += delta_cpu;
3855 if (delta) {
3856 lstats->state[i] += delta;
3857 if (plstats)
3858 plstats->state_pending[i] += delta;
3862 WRITE_ONCE(statc->stats_updates, 0);
3863 /* We are in a per-cpu loop here, only do the atomic write once */
3864 if (atomic64_read(&memcg->vmstats->stats_updates))
3865 atomic64_set(&memcg->vmstats->stats_updates, 0);
3868 static void mem_cgroup_fork(struct task_struct *task)
3871 * Set the update flag to cause task->objcg to be initialized lazily
3872 * on the first allocation. It can be done without any synchronization
3873 * because it's always performed on the current task, so does
3874 * current_objcg_update().
3876 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3879 static void mem_cgroup_exit(struct task_struct *task)
3881 struct obj_cgroup *objcg = task->objcg;
3883 objcg = (struct obj_cgroup *)
3884 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3885 obj_cgroup_put(objcg);
3888 * Some kernel allocations can happen after this point,
3889 * but let's ignore them. It can be done without any synchronization
3890 * because it's always performed on the current task, so does
3891 * current_objcg_update().
3893 task->objcg = NULL;
3896 #ifdef CONFIG_LRU_GEN
3897 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3899 struct task_struct *task;
3900 struct cgroup_subsys_state *css;
3902 /* find the first leader if there is any */
3903 cgroup_taskset_for_each_leader(task, css, tset)
3904 break;
3906 if (!task)
3907 return;
3909 task_lock(task);
3910 if (task->mm && READ_ONCE(task->mm->owner) == task)
3911 lru_gen_migrate_mm(task->mm);
3912 task_unlock(task);
3914 #else
3915 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
3916 #endif /* CONFIG_LRU_GEN */
3918 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
3920 struct task_struct *task;
3921 struct cgroup_subsys_state *css;
3923 cgroup_taskset_for_each(task, css, tset) {
3924 /* atomically set the update bit */
3925 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
3929 static void mem_cgroup_attach(struct cgroup_taskset *tset)
3931 mem_cgroup_lru_gen_attach(tset);
3932 mem_cgroup_kmem_attach(tset);
3935 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
3937 if (value == PAGE_COUNTER_MAX)
3938 seq_puts(m, "max\n");
3939 else
3940 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
3942 return 0;
3945 static u64 memory_current_read(struct cgroup_subsys_state *css,
3946 struct cftype *cft)
3948 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3950 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3953 static u64 memory_peak_read(struct cgroup_subsys_state *css,
3954 struct cftype *cft)
3956 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3958 return (u64)memcg->memory.watermark * PAGE_SIZE;
3961 static int memory_min_show(struct seq_file *m, void *v)
3963 return seq_puts_memcg_tunable(m,
3964 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
3967 static ssize_t memory_min_write(struct kernfs_open_file *of,
3968 char *buf, size_t nbytes, loff_t off)
3970 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3971 unsigned long min;
3972 int err;
3974 buf = strstrip(buf);
3975 err = page_counter_memparse(buf, "max", &min);
3976 if (err)
3977 return err;
3979 page_counter_set_min(&memcg->memory, min);
3981 return nbytes;
3984 static int memory_low_show(struct seq_file *m, void *v)
3986 return seq_puts_memcg_tunable(m,
3987 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
3990 static ssize_t memory_low_write(struct kernfs_open_file *of,
3991 char *buf, size_t nbytes, loff_t off)
3993 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3994 unsigned long low;
3995 int err;
3997 buf = strstrip(buf);
3998 err = page_counter_memparse(buf, "max", &low);
3999 if (err)
4000 return err;
4002 page_counter_set_low(&memcg->memory, low);
4004 return nbytes;
4007 static int memory_high_show(struct seq_file *m, void *v)
4009 return seq_puts_memcg_tunable(m,
4010 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4013 static ssize_t memory_high_write(struct kernfs_open_file *of,
4014 char *buf, size_t nbytes, loff_t off)
4016 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4017 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4018 bool drained = false;
4019 unsigned long high;
4020 int err;
4022 buf = strstrip(buf);
4023 err = page_counter_memparse(buf, "max", &high);
4024 if (err)
4025 return err;
4027 page_counter_set_high(&memcg->memory, high);
4029 for (;;) {
4030 unsigned long nr_pages = page_counter_read(&memcg->memory);
4031 unsigned long reclaimed;
4033 if (nr_pages <= high)
4034 break;
4036 if (signal_pending(current))
4037 break;
4039 if (!drained) {
4040 drain_all_stock(memcg);
4041 drained = true;
4042 continue;
4045 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4046 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4048 if (!reclaimed && !nr_retries--)
4049 break;
4052 memcg_wb_domain_size_changed(memcg);
4053 return nbytes;
4056 static int memory_max_show(struct seq_file *m, void *v)
4058 return seq_puts_memcg_tunable(m,
4059 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4062 static ssize_t memory_max_write(struct kernfs_open_file *of,
4063 char *buf, size_t nbytes, loff_t off)
4065 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4066 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4067 bool drained = false;
4068 unsigned long max;
4069 int err;
4071 buf = strstrip(buf);
4072 err = page_counter_memparse(buf, "max", &max);
4073 if (err)
4074 return err;
4076 xchg(&memcg->memory.max, max);
4078 for (;;) {
4079 unsigned long nr_pages = page_counter_read(&memcg->memory);
4081 if (nr_pages <= max)
4082 break;
4084 if (signal_pending(current))
4085 break;
4087 if (!drained) {
4088 drain_all_stock(memcg);
4089 drained = true;
4090 continue;
4093 if (nr_reclaims) {
4094 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4095 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4096 nr_reclaims--;
4097 continue;
4100 memcg_memory_event(memcg, MEMCG_OOM);
4101 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4102 break;
4105 memcg_wb_domain_size_changed(memcg);
4106 return nbytes;
4110 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4111 * if any new events become available.
4113 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4115 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4116 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4117 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4118 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4119 seq_printf(m, "oom_kill %lu\n",
4120 atomic_long_read(&events[MEMCG_OOM_KILL]));
4121 seq_printf(m, "oom_group_kill %lu\n",
4122 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4125 static int memory_events_show(struct seq_file *m, void *v)
4127 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4129 __memory_events_show(m, memcg->memory_events);
4130 return 0;
4133 static int memory_events_local_show(struct seq_file *m, void *v)
4135 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4137 __memory_events_show(m, memcg->memory_events_local);
4138 return 0;
4141 int memory_stat_show(struct seq_file *m, void *v)
4143 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4144 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4145 struct seq_buf s;
4147 if (!buf)
4148 return -ENOMEM;
4149 seq_buf_init(&s, buf, PAGE_SIZE);
4150 memory_stat_format(memcg, &s);
4151 seq_puts(m, buf);
4152 kfree(buf);
4153 return 0;
4156 #ifdef CONFIG_NUMA
4157 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4158 int item)
4160 return lruvec_page_state(lruvec, item) *
4161 memcg_page_state_output_unit(item);
4164 static int memory_numa_stat_show(struct seq_file *m, void *v)
4166 int i;
4167 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4169 mem_cgroup_flush_stats(memcg);
4171 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4172 int nid;
4174 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4175 continue;
4177 seq_printf(m, "%s", memory_stats[i].name);
4178 for_each_node_state(nid, N_MEMORY) {
4179 u64 size;
4180 struct lruvec *lruvec;
4182 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4183 size = lruvec_page_state_output(lruvec,
4184 memory_stats[i].idx);
4185 seq_printf(m, " N%d=%llu", nid, size);
4187 seq_putc(m, '\n');
4190 return 0;
4192 #endif
4194 static int memory_oom_group_show(struct seq_file *m, void *v)
4196 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4198 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4200 return 0;
4203 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4204 char *buf, size_t nbytes, loff_t off)
4206 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4207 int ret, oom_group;
4209 buf = strstrip(buf);
4210 if (!buf)
4211 return -EINVAL;
4213 ret = kstrtoint(buf, 0, &oom_group);
4214 if (ret)
4215 return ret;
4217 if (oom_group != 0 && oom_group != 1)
4218 return -EINVAL;
4220 WRITE_ONCE(memcg->oom_group, oom_group);
4222 return nbytes;
4225 enum {
4226 MEMORY_RECLAIM_SWAPPINESS = 0,
4227 MEMORY_RECLAIM_NULL,
4230 static const match_table_t tokens = {
4231 { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4232 { MEMORY_RECLAIM_NULL, NULL },
4235 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4236 size_t nbytes, loff_t off)
4238 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4239 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4240 unsigned long nr_to_reclaim, nr_reclaimed = 0;
4241 int swappiness = -1;
4242 unsigned int reclaim_options;
4243 char *old_buf, *start;
4244 substring_t args[MAX_OPT_ARGS];
4246 buf = strstrip(buf);
4248 old_buf = buf;
4249 nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4250 if (buf == old_buf)
4251 return -EINVAL;
4253 buf = strstrip(buf);
4255 while ((start = strsep(&buf, " ")) != NULL) {
4256 if (!strlen(start))
4257 continue;
4258 switch (match_token(start, tokens, args)) {
4259 case MEMORY_RECLAIM_SWAPPINESS:
4260 if (match_int(&args[0], &swappiness))
4261 return -EINVAL;
4262 if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4263 return -EINVAL;
4264 break;
4265 default:
4266 return -EINVAL;
4270 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4271 while (nr_reclaimed < nr_to_reclaim) {
4272 /* Will converge on zero, but reclaim enforces a minimum */
4273 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4274 unsigned long reclaimed;
4276 if (signal_pending(current))
4277 return -EINTR;
4280 * This is the final attempt, drain percpu lru caches in the
4281 * hope of introducing more evictable pages for
4282 * try_to_free_mem_cgroup_pages().
4284 if (!nr_retries)
4285 lru_add_drain_all();
4287 reclaimed = try_to_free_mem_cgroup_pages(memcg,
4288 batch_size, GFP_KERNEL,
4289 reclaim_options,
4290 swappiness == -1 ? NULL : &swappiness);
4292 if (!reclaimed && !nr_retries--)
4293 return -EAGAIN;
4295 nr_reclaimed += reclaimed;
4298 return nbytes;
4301 static struct cftype memory_files[] = {
4303 .name = "current",
4304 .flags = CFTYPE_NOT_ON_ROOT,
4305 .read_u64 = memory_current_read,
4308 .name = "peak",
4309 .flags = CFTYPE_NOT_ON_ROOT,
4310 .read_u64 = memory_peak_read,
4313 .name = "min",
4314 .flags = CFTYPE_NOT_ON_ROOT,
4315 .seq_show = memory_min_show,
4316 .write = memory_min_write,
4319 .name = "low",
4320 .flags = CFTYPE_NOT_ON_ROOT,
4321 .seq_show = memory_low_show,
4322 .write = memory_low_write,
4325 .name = "high",
4326 .flags = CFTYPE_NOT_ON_ROOT,
4327 .seq_show = memory_high_show,
4328 .write = memory_high_write,
4331 .name = "max",
4332 .flags = CFTYPE_NOT_ON_ROOT,
4333 .seq_show = memory_max_show,
4334 .write = memory_max_write,
4337 .name = "events",
4338 .flags = CFTYPE_NOT_ON_ROOT,
4339 .file_offset = offsetof(struct mem_cgroup, events_file),
4340 .seq_show = memory_events_show,
4343 .name = "events.local",
4344 .flags = CFTYPE_NOT_ON_ROOT,
4345 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4346 .seq_show = memory_events_local_show,
4349 .name = "stat",
4350 .seq_show = memory_stat_show,
4352 #ifdef CONFIG_NUMA
4354 .name = "numa_stat",
4355 .seq_show = memory_numa_stat_show,
4357 #endif
4359 .name = "oom.group",
4360 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4361 .seq_show = memory_oom_group_show,
4362 .write = memory_oom_group_write,
4365 .name = "reclaim",
4366 .flags = CFTYPE_NS_DELEGATABLE,
4367 .write = memory_reclaim,
4369 { } /* terminate */
4372 struct cgroup_subsys memory_cgrp_subsys = {
4373 .css_alloc = mem_cgroup_css_alloc,
4374 .css_online = mem_cgroup_css_online,
4375 .css_offline = mem_cgroup_css_offline,
4376 .css_released = mem_cgroup_css_released,
4377 .css_free = mem_cgroup_css_free,
4378 .css_reset = mem_cgroup_css_reset,
4379 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4380 .attach = mem_cgroup_attach,
4381 .fork = mem_cgroup_fork,
4382 .exit = mem_cgroup_exit,
4383 .dfl_cftypes = memory_files,
4384 #ifdef CONFIG_MEMCG_V1
4385 .can_attach = memcg1_can_attach,
4386 .cancel_attach = memcg1_cancel_attach,
4387 .post_attach = memcg1_move_task,
4388 .legacy_cftypes = mem_cgroup_legacy_files,
4389 #endif
4390 .early_init = 0,
4394 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4395 * @root: the top ancestor of the sub-tree being checked
4396 * @memcg: the memory cgroup to check
4398 * WARNING: This function is not stateless! It can only be used as part
4399 * of a top-down tree iteration, not for isolated queries.
4401 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4402 struct mem_cgroup *memcg)
4404 bool recursive_protection =
4405 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4407 if (mem_cgroup_disabled())
4408 return;
4410 if (!root)
4411 root = root_mem_cgroup;
4413 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4416 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4417 gfp_t gfp)
4419 int ret;
4421 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4422 if (ret)
4423 goto out;
4425 mem_cgroup_commit_charge(folio, memcg);
4426 out:
4427 return ret;
4430 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4432 struct mem_cgroup *memcg;
4433 int ret;
4435 memcg = get_mem_cgroup_from_mm(mm);
4436 ret = charge_memcg(folio, memcg, gfp);
4437 css_put(&memcg->css);
4439 return ret;
4443 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
4444 * @memcg: memcg to charge.
4445 * @gfp: reclaim mode.
4446 * @nr_pages: number of pages to charge.
4448 * This function is called when allocating a huge page folio to determine if
4449 * the memcg has the capacity for it. It does not commit the charge yet,
4450 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
4452 * Once we have obtained the hugetlb folio, we can call
4453 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
4454 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
4455 * of try_charge().
4457 * Returns 0 on success. Otherwise, an error code is returned.
4459 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
4460 long nr_pages)
4463 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
4464 * but do not attempt to commit charge later (or cancel on error) either.
4466 if (mem_cgroup_disabled() || !memcg ||
4467 !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
4468 !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
4469 return -EOPNOTSUPP;
4471 if (try_charge(memcg, gfp, nr_pages))
4472 return -ENOMEM;
4474 return 0;
4478 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4479 * @folio: folio to charge.
4480 * @mm: mm context of the victim
4481 * @gfp: reclaim mode
4482 * @entry: swap entry for which the folio is allocated
4484 * This function charges a folio allocated for swapin. Please call this before
4485 * adding the folio to the swapcache.
4487 * Returns 0 on success. Otherwise, an error code is returned.
4489 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4490 gfp_t gfp, swp_entry_t entry)
4492 struct mem_cgroup *memcg;
4493 unsigned short id;
4494 int ret;
4496 if (mem_cgroup_disabled())
4497 return 0;
4499 id = lookup_swap_cgroup_id(entry);
4500 rcu_read_lock();
4501 memcg = mem_cgroup_from_id(id);
4502 if (!memcg || !css_tryget_online(&memcg->css))
4503 memcg = get_mem_cgroup_from_mm(mm);
4504 rcu_read_unlock();
4506 ret = charge_memcg(folio, memcg, gfp);
4508 css_put(&memcg->css);
4509 return ret;
4513 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4514 * @entry: swap entry for which the page is charged
4516 * Call this function after successfully adding the charged page to swapcache.
4518 * Note: This function assumes the page for which swap slot is being uncharged
4519 * is order 0 page.
4521 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
4524 * Cgroup1's unified memory+swap counter has been charged with the
4525 * new swapcache page, finish the transfer by uncharging the swap
4526 * slot. The swap slot would also get uncharged when it dies, but
4527 * it can stick around indefinitely and we'd count the page twice
4528 * the entire time.
4530 * Cgroup2 has separate resource counters for memory and swap,
4531 * so this is a non-issue here. Memory and swap charge lifetimes
4532 * correspond 1:1 to page and swap slot lifetimes: we charge the
4533 * page to memory here, and uncharge swap when the slot is freed.
4535 if (!mem_cgroup_disabled() && do_memsw_account()) {
4537 * The swap entry might not get freed for a long time,
4538 * let's not wait for it. The page already received a
4539 * memory+swap charge, drop the swap entry duplicate.
4541 mem_cgroup_uncharge_swap(entry, 1);
4545 struct uncharge_gather {
4546 struct mem_cgroup *memcg;
4547 unsigned long nr_memory;
4548 unsigned long pgpgout;
4549 unsigned long nr_kmem;
4550 int nid;
4553 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4555 memset(ug, 0, sizeof(*ug));
4558 static void uncharge_batch(const struct uncharge_gather *ug)
4560 unsigned long flags;
4562 if (ug->nr_memory) {
4563 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4564 if (do_memsw_account())
4565 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4566 if (ug->nr_kmem) {
4567 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4568 memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4570 memcg1_oom_recover(ug->memcg);
4573 local_irq_save(flags);
4574 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
4575 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
4576 memcg1_check_events(ug->memcg, ug->nid);
4577 local_irq_restore(flags);
4579 /* drop reference from uncharge_folio */
4580 css_put(&ug->memcg->css);
4583 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4585 long nr_pages;
4586 struct mem_cgroup *memcg;
4587 struct obj_cgroup *objcg;
4589 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4590 VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
4591 !folio_test_hugetlb(folio) &&
4592 !list_empty(&folio->_deferred_list), folio);
4595 * Nobody should be changing or seriously looking at
4596 * folio memcg or objcg at this point, we have fully
4597 * exclusive access to the folio.
4599 if (folio_memcg_kmem(folio)) {
4600 objcg = __folio_objcg(folio);
4602 * This get matches the put at the end of the function and
4603 * kmem pages do not hold memcg references anymore.
4605 memcg = get_mem_cgroup_from_objcg(objcg);
4606 } else {
4607 memcg = __folio_memcg(folio);
4610 if (!memcg)
4611 return;
4613 if (ug->memcg != memcg) {
4614 if (ug->memcg) {
4615 uncharge_batch(ug);
4616 uncharge_gather_clear(ug);
4618 ug->memcg = memcg;
4619 ug->nid = folio_nid(folio);
4621 /* pairs with css_put in uncharge_batch */
4622 css_get(&memcg->css);
4625 nr_pages = folio_nr_pages(folio);
4627 if (folio_memcg_kmem(folio)) {
4628 ug->nr_memory += nr_pages;
4629 ug->nr_kmem += nr_pages;
4631 folio->memcg_data = 0;
4632 obj_cgroup_put(objcg);
4633 } else {
4634 /* LRU pages aren't accounted at the root level */
4635 if (!mem_cgroup_is_root(memcg))
4636 ug->nr_memory += nr_pages;
4637 ug->pgpgout++;
4639 folio->memcg_data = 0;
4642 css_put(&memcg->css);
4645 void __mem_cgroup_uncharge(struct folio *folio)
4647 struct uncharge_gather ug;
4649 /* Don't touch folio->lru of any random page, pre-check: */
4650 if (!folio_memcg(folio))
4651 return;
4653 uncharge_gather_clear(&ug);
4654 uncharge_folio(folio, &ug);
4655 uncharge_batch(&ug);
4658 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4660 struct uncharge_gather ug;
4661 unsigned int i;
4663 uncharge_gather_clear(&ug);
4664 for (i = 0; i < folios->nr; i++)
4665 uncharge_folio(folios->folios[i], &ug);
4666 if (ug.memcg)
4667 uncharge_batch(&ug);
4671 * mem_cgroup_replace_folio - Charge a folio's replacement.
4672 * @old: Currently circulating folio.
4673 * @new: Replacement folio.
4675 * Charge @new as a replacement folio for @old. @old will
4676 * be uncharged upon free.
4678 * Both folios must be locked, @new->mapping must be set up.
4680 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4682 struct mem_cgroup *memcg;
4683 long nr_pages = folio_nr_pages(new);
4684 unsigned long flags;
4686 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4687 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4688 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4689 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4691 if (mem_cgroup_disabled())
4692 return;
4694 /* Page cache replacement: new folio already charged? */
4695 if (folio_memcg(new))
4696 return;
4698 memcg = folio_memcg(old);
4699 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4700 if (!memcg)
4701 return;
4703 /* Force-charge the new page. The old one will be freed soon */
4704 if (!mem_cgroup_is_root(memcg)) {
4705 page_counter_charge(&memcg->memory, nr_pages);
4706 if (do_memsw_account())
4707 page_counter_charge(&memcg->memsw, nr_pages);
4710 css_get(&memcg->css);
4711 commit_charge(new, memcg);
4713 local_irq_save(flags);
4714 mem_cgroup_charge_statistics(memcg, nr_pages);
4715 memcg1_check_events(memcg, folio_nid(new));
4716 local_irq_restore(flags);
4720 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4721 * @old: Currently circulating folio.
4722 * @new: Replacement folio.
4724 * Transfer the memcg data from the old folio to the new folio for migration.
4725 * The old folio's data info will be cleared. Note that the memory counters
4726 * will remain unchanged throughout the process.
4728 * Both folios must be locked, @new->mapping must be set up.
4730 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4732 struct mem_cgroup *memcg;
4734 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4735 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4736 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4737 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4738 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4740 if (mem_cgroup_disabled())
4741 return;
4743 memcg = folio_memcg(old);
4745 * Note that it is normal to see !memcg for a hugetlb folio.
4746 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4747 * was not selected.
4749 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4750 if (!memcg)
4751 return;
4753 /* Transfer the charge and the css ref */
4754 commit_charge(new, memcg);
4755 old->memcg_data = 0;
4758 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4759 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4761 void mem_cgroup_sk_alloc(struct sock *sk)
4763 struct mem_cgroup *memcg;
4765 if (!mem_cgroup_sockets_enabled)
4766 return;
4768 /* Do not associate the sock with unrelated interrupted task's memcg. */
4769 if (!in_task())
4770 return;
4772 rcu_read_lock();
4773 memcg = mem_cgroup_from_task(current);
4774 if (mem_cgroup_is_root(memcg))
4775 goto out;
4776 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4777 goto out;
4778 if (css_tryget(&memcg->css))
4779 sk->sk_memcg = memcg;
4780 out:
4781 rcu_read_unlock();
4784 void mem_cgroup_sk_free(struct sock *sk)
4786 if (sk->sk_memcg)
4787 css_put(&sk->sk_memcg->css);
4791 * mem_cgroup_charge_skmem - charge socket memory
4792 * @memcg: memcg to charge
4793 * @nr_pages: number of pages to charge
4794 * @gfp_mask: reclaim mode
4796 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4797 * @memcg's configured limit, %false if it doesn't.
4799 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4800 gfp_t gfp_mask)
4802 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4803 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4805 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
4806 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4807 return true;
4810 return false;
4814 * mem_cgroup_uncharge_skmem - uncharge socket memory
4815 * @memcg: memcg to uncharge
4816 * @nr_pages: number of pages to uncharge
4818 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4820 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4821 memcg1_uncharge_skmem(memcg, nr_pages);
4822 return;
4825 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4827 refill_stock(memcg, nr_pages);
4830 static int __init cgroup_memory(char *s)
4832 char *token;
4834 while ((token = strsep(&s, ",")) != NULL) {
4835 if (!*token)
4836 continue;
4837 if (!strcmp(token, "nosocket"))
4838 cgroup_memory_nosocket = true;
4839 if (!strcmp(token, "nokmem"))
4840 cgroup_memory_nokmem = true;
4841 if (!strcmp(token, "nobpf"))
4842 cgroup_memory_nobpf = true;
4844 return 1;
4846 __setup("cgroup.memory=", cgroup_memory);
4849 * subsys_initcall() for memory controller.
4851 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4852 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4853 * basically everything that doesn't depend on a specific mem_cgroup structure
4854 * should be initialized from here.
4856 static int __init mem_cgroup_init(void)
4858 int cpu;
4861 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4862 * used for per-memcg-per-cpu caching of per-node statistics. In order
4863 * to work fine, we should make sure that the overfill threshold can't
4864 * exceed S32_MAX / PAGE_SIZE.
4866 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4868 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4869 memcg_hotplug_cpu_dead);
4871 for_each_possible_cpu(cpu)
4872 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4873 drain_local_stock);
4875 return 0;
4877 subsys_initcall(mem_cgroup_init);
4879 #ifdef CONFIG_SWAP
4880 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
4882 while (!refcount_inc_not_zero(&memcg->id.ref)) {
4884 * The root cgroup cannot be destroyed, so it's refcount must
4885 * always be >= 1.
4887 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
4888 VM_BUG_ON(1);
4889 break;
4891 memcg = parent_mem_cgroup(memcg);
4892 if (!memcg)
4893 memcg = root_mem_cgroup;
4895 return memcg;
4899 * mem_cgroup_swapout - transfer a memsw charge to swap
4900 * @folio: folio whose memsw charge to transfer
4901 * @entry: swap entry to move the charge to
4903 * Transfer the memsw charge of @folio to @entry.
4905 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
4907 struct mem_cgroup *memcg, *swap_memcg;
4908 unsigned int nr_entries;
4909 unsigned short oldid;
4911 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4912 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
4914 if (mem_cgroup_disabled())
4915 return;
4917 if (!do_memsw_account())
4918 return;
4920 memcg = folio_memcg(folio);
4922 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4923 if (!memcg)
4924 return;
4927 * In case the memcg owning these pages has been offlined and doesn't
4928 * have an ID allocated to it anymore, charge the closest online
4929 * ancestor for the swap instead and transfer the memory+swap charge.
4931 swap_memcg = mem_cgroup_id_get_online(memcg);
4932 nr_entries = folio_nr_pages(folio);
4933 /* Get references for the tail pages, too */
4934 if (nr_entries > 1)
4935 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
4936 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
4937 nr_entries);
4938 VM_BUG_ON_FOLIO(oldid, folio);
4939 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
4941 folio->memcg_data = 0;
4943 if (!mem_cgroup_is_root(memcg))
4944 page_counter_uncharge(&memcg->memory, nr_entries);
4946 if (memcg != swap_memcg) {
4947 if (!mem_cgroup_is_root(swap_memcg))
4948 page_counter_charge(&swap_memcg->memsw, nr_entries);
4949 page_counter_uncharge(&memcg->memsw, nr_entries);
4953 * Interrupts should be disabled here because the caller holds the
4954 * i_pages lock which is taken with interrupts-off. It is
4955 * important here to have the interrupts disabled because it is the
4956 * only synchronisation we have for updating the per-CPU variables.
4958 memcg_stats_lock();
4959 mem_cgroup_charge_statistics(memcg, -nr_entries);
4960 memcg_stats_unlock();
4961 memcg1_check_events(memcg, folio_nid(folio));
4963 css_put(&memcg->css);
4967 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
4968 * @folio: folio being added to swap
4969 * @entry: swap entry to charge
4971 * Try to charge @folio's memcg for the swap space at @entry.
4973 * Returns 0 on success, -ENOMEM on failure.
4975 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
4977 unsigned int nr_pages = folio_nr_pages(folio);
4978 struct page_counter *counter;
4979 struct mem_cgroup *memcg;
4980 unsigned short oldid;
4982 if (do_memsw_account())
4983 return 0;
4985 memcg = folio_memcg(folio);
4987 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4988 if (!memcg)
4989 return 0;
4991 if (!entry.val) {
4992 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
4993 return 0;
4996 memcg = mem_cgroup_id_get_online(memcg);
4998 if (!mem_cgroup_is_root(memcg) &&
4999 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5000 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5001 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5002 mem_cgroup_id_put(memcg);
5003 return -ENOMEM;
5006 /* Get references for the tail pages, too */
5007 if (nr_pages > 1)
5008 mem_cgroup_id_get_many(memcg, nr_pages - 1);
5009 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
5010 VM_BUG_ON_FOLIO(oldid, folio);
5011 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5013 return 0;
5017 * __mem_cgroup_uncharge_swap - uncharge swap space
5018 * @entry: swap entry to uncharge
5019 * @nr_pages: the amount of swap space to uncharge
5021 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5023 struct mem_cgroup *memcg;
5024 unsigned short id;
5026 id = swap_cgroup_record(entry, 0, nr_pages);
5027 rcu_read_lock();
5028 memcg = mem_cgroup_from_id(id);
5029 if (memcg) {
5030 if (!mem_cgroup_is_root(memcg)) {
5031 if (do_memsw_account())
5032 page_counter_uncharge(&memcg->memsw, nr_pages);
5033 else
5034 page_counter_uncharge(&memcg->swap, nr_pages);
5036 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5037 mem_cgroup_id_put_many(memcg, nr_pages);
5039 rcu_read_unlock();
5042 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5044 long nr_swap_pages = get_nr_swap_pages();
5046 if (mem_cgroup_disabled() || do_memsw_account())
5047 return nr_swap_pages;
5048 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5049 nr_swap_pages = min_t(long, nr_swap_pages,
5050 READ_ONCE(memcg->swap.max) -
5051 page_counter_read(&memcg->swap));
5052 return nr_swap_pages;
5055 bool mem_cgroup_swap_full(struct folio *folio)
5057 struct mem_cgroup *memcg;
5059 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5061 if (vm_swap_full())
5062 return true;
5063 if (do_memsw_account())
5064 return false;
5066 memcg = folio_memcg(folio);
5067 if (!memcg)
5068 return false;
5070 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5071 unsigned long usage = page_counter_read(&memcg->swap);
5073 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5074 usage * 2 >= READ_ONCE(memcg->swap.max))
5075 return true;
5078 return false;
5081 static int __init setup_swap_account(char *s)
5083 bool res;
5085 if (!kstrtobool(s, &res) && !res)
5086 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5087 "in favor of configuring swap control via cgroupfs. "
5088 "Please report your usecase to linux-mm@kvack.org if you "
5089 "depend on this functionality.\n");
5090 return 1;
5092 __setup("swapaccount=", setup_swap_account);
5094 static u64 swap_current_read(struct cgroup_subsys_state *css,
5095 struct cftype *cft)
5097 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5099 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5102 static u64 swap_peak_read(struct cgroup_subsys_state *css,
5103 struct cftype *cft)
5105 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5107 return (u64)memcg->swap.watermark * PAGE_SIZE;
5110 static int swap_high_show(struct seq_file *m, void *v)
5112 return seq_puts_memcg_tunable(m,
5113 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5116 static ssize_t swap_high_write(struct kernfs_open_file *of,
5117 char *buf, size_t nbytes, loff_t off)
5119 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5120 unsigned long high;
5121 int err;
5123 buf = strstrip(buf);
5124 err = page_counter_memparse(buf, "max", &high);
5125 if (err)
5126 return err;
5128 page_counter_set_high(&memcg->swap, high);
5130 return nbytes;
5133 static int swap_max_show(struct seq_file *m, void *v)
5135 return seq_puts_memcg_tunable(m,
5136 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5139 static ssize_t swap_max_write(struct kernfs_open_file *of,
5140 char *buf, size_t nbytes, loff_t off)
5142 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5143 unsigned long max;
5144 int err;
5146 buf = strstrip(buf);
5147 err = page_counter_memparse(buf, "max", &max);
5148 if (err)
5149 return err;
5151 xchg(&memcg->swap.max, max);
5153 return nbytes;
5156 static int swap_events_show(struct seq_file *m, void *v)
5158 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5160 seq_printf(m, "high %lu\n",
5161 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5162 seq_printf(m, "max %lu\n",
5163 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5164 seq_printf(m, "fail %lu\n",
5165 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5167 return 0;
5170 static struct cftype swap_files[] = {
5172 .name = "swap.current",
5173 .flags = CFTYPE_NOT_ON_ROOT,
5174 .read_u64 = swap_current_read,
5177 .name = "swap.high",
5178 .flags = CFTYPE_NOT_ON_ROOT,
5179 .seq_show = swap_high_show,
5180 .write = swap_high_write,
5183 .name = "swap.max",
5184 .flags = CFTYPE_NOT_ON_ROOT,
5185 .seq_show = swap_max_show,
5186 .write = swap_max_write,
5189 .name = "swap.peak",
5190 .flags = CFTYPE_NOT_ON_ROOT,
5191 .read_u64 = swap_peak_read,
5194 .name = "swap.events",
5195 .flags = CFTYPE_NOT_ON_ROOT,
5196 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5197 .seq_show = swap_events_show,
5199 { } /* terminate */
5202 #ifdef CONFIG_ZSWAP
5204 * obj_cgroup_may_zswap - check if this cgroup can zswap
5205 * @objcg: the object cgroup
5207 * Check if the hierarchical zswap limit has been reached.
5209 * This doesn't check for specific headroom, and it is not atomic
5210 * either. But with zswap, the size of the allocation is only known
5211 * once compression has occurred, and this optimistic pre-check avoids
5212 * spending cycles on compression when there is already no room left
5213 * or zswap is disabled altogether somewhere in the hierarchy.
5215 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5217 struct mem_cgroup *memcg, *original_memcg;
5218 bool ret = true;
5220 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5221 return true;
5223 original_memcg = get_mem_cgroup_from_objcg(objcg);
5224 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5225 memcg = parent_mem_cgroup(memcg)) {
5226 unsigned long max = READ_ONCE(memcg->zswap_max);
5227 unsigned long pages;
5229 if (max == PAGE_COUNTER_MAX)
5230 continue;
5231 if (max == 0) {
5232 ret = false;
5233 break;
5237 * mem_cgroup_flush_stats() ignores small changes. Use
5238 * do_flush_stats() directly to get accurate stats for charging.
5240 do_flush_stats(memcg);
5241 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5242 if (pages < max)
5243 continue;
5244 ret = false;
5245 break;
5247 mem_cgroup_put(original_memcg);
5248 return ret;
5252 * obj_cgroup_charge_zswap - charge compression backend memory
5253 * @objcg: the object cgroup
5254 * @size: size of compressed object
5256 * This forces the charge after obj_cgroup_may_zswap() allowed
5257 * compression and storage in zwap for this cgroup to go ahead.
5259 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5261 struct mem_cgroup *memcg;
5263 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5264 return;
5266 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5268 /* PF_MEMALLOC context, charging must succeed */
5269 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5270 VM_WARN_ON_ONCE(1);
5272 rcu_read_lock();
5273 memcg = obj_cgroup_memcg(objcg);
5274 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5275 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5276 rcu_read_unlock();
5280 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5281 * @objcg: the object cgroup
5282 * @size: size of compressed object
5284 * Uncharges zswap memory on page in.
5286 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5288 struct mem_cgroup *memcg;
5290 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5291 return;
5293 obj_cgroup_uncharge(objcg, size);
5295 rcu_read_lock();
5296 memcg = obj_cgroup_memcg(objcg);
5297 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5298 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5299 rcu_read_unlock();
5302 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5304 /* if zswap is disabled, do not block pages going to the swapping device */
5305 return !zswap_is_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
5308 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5309 struct cftype *cft)
5311 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5313 mem_cgroup_flush_stats(memcg);
5314 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5317 static int zswap_max_show(struct seq_file *m, void *v)
5319 return seq_puts_memcg_tunable(m,
5320 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5323 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5324 char *buf, size_t nbytes, loff_t off)
5326 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5327 unsigned long max;
5328 int err;
5330 buf = strstrip(buf);
5331 err = page_counter_memparse(buf, "max", &max);
5332 if (err)
5333 return err;
5335 xchg(&memcg->zswap_max, max);
5337 return nbytes;
5340 static int zswap_writeback_show(struct seq_file *m, void *v)
5342 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5344 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5345 return 0;
5348 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5349 char *buf, size_t nbytes, loff_t off)
5351 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5352 int zswap_writeback;
5353 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5355 if (parse_ret)
5356 return parse_ret;
5358 if (zswap_writeback != 0 && zswap_writeback != 1)
5359 return -EINVAL;
5361 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5362 return nbytes;
5365 static struct cftype zswap_files[] = {
5367 .name = "zswap.current",
5368 .flags = CFTYPE_NOT_ON_ROOT,
5369 .read_u64 = zswap_current_read,
5372 .name = "zswap.max",
5373 .flags = CFTYPE_NOT_ON_ROOT,
5374 .seq_show = zswap_max_show,
5375 .write = zswap_max_write,
5378 .name = "zswap.writeback",
5379 .seq_show = zswap_writeback_show,
5380 .write = zswap_writeback_write,
5382 { } /* terminate */
5384 #endif /* CONFIG_ZSWAP */
5386 static int __init mem_cgroup_swap_init(void)
5388 if (mem_cgroup_disabled())
5389 return 0;
5391 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5392 #ifdef CONFIG_MEMCG_V1
5393 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5394 #endif
5395 #ifdef CONFIG_ZSWAP
5396 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5397 #endif
5398 return 0;
5400 subsys_initcall(mem_cgroup_swap_init);
5402 #endif /* CONFIG_SWAP */