4 * Copyright (C) 2004, Red Hat, Inc.
5 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
6 * Released under the GPL, see the file COPYING for details.
8 * Simple token based thrashing protection, using the algorithm
9 * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
11 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
12 * Improved algorithm to pass token:
13 * Each task has a priority which is incremented if it contended
14 * for the token in an interval less than its previous attempt.
15 * If the token is acquired, that task's priority is boosted to prevent
16 * the token from bouncing around too often and to let the task make
17 * some progress in its execution.
20 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23 #include <linux/swap.h>
24 #include <linux/memcontrol.h>
26 #include <trace/events/vmscan.h>
28 #define TOKEN_AGING_INTERVAL (0xFF)
30 static DEFINE_SPINLOCK(swap_token_lock
);
31 struct mm_struct
*swap_token_mm
;
32 static struct mem_cgroup
*swap_token_memcg
;
34 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
35 static struct mem_cgroup
*swap_token_memcg_from_mm(struct mm_struct
*mm
)
37 struct mem_cgroup
*memcg
;
39 memcg
= try_get_mem_cgroup_from_mm(mm
);
41 css_put(mem_cgroup_css(memcg
));
46 static struct mem_cgroup
*swap_token_memcg_from_mm(struct mm_struct
*mm
)
52 void grab_swap_token(struct mm_struct
*mm
)
55 unsigned int old_prio
= mm
->token_priority
;
56 static unsigned int global_faults
;
57 static unsigned int last_aging
;
61 current_interval
= global_faults
- mm
->faultstamp
;
63 if (!spin_trylock(&swap_token_lock
))
66 /* First come first served */
71 * Usually, we don't need priority aging because long interval faults
72 * makes priority decrease quickly. But there is one exception. If the
73 * token owner task is sleeping, it never make long interval faults.
74 * Thus, we need a priority aging mechanism instead. The requirements
75 * of priority aging are
76 * 1) An aging interval is reasonable enough long. Too short aging
77 * interval makes quick swap token lost and decrease performance.
78 * 2) The swap token owner task have to get priority aging even if
81 if ((global_faults
- last_aging
) > TOKEN_AGING_INTERVAL
) {
82 swap_token_mm
->token_priority
/= 2;
83 last_aging
= global_faults
;
86 if (mm
== swap_token_mm
) {
87 mm
->token_priority
+= 2;
91 if (current_interval
< mm
->last_interval
)
94 if (likely(mm
->token_priority
> 0))
98 /* Check if we deserve the token */
99 if (mm
->token_priority
> swap_token_mm
->token_priority
)
103 trace_update_swap_token_priority(mm
, old_prio
, swap_token_mm
);
106 mm
->faultstamp
= global_faults
;
107 mm
->last_interval
= current_interval
;
108 spin_unlock(&swap_token_lock
);
112 mm
->token_priority
+= 2;
113 trace_replace_swap_token(swap_token_mm
, mm
);
115 swap_token_memcg
= swap_token_memcg_from_mm(mm
);
116 last_aging
= global_faults
;
120 /* Called on process exit. */
121 void __put_swap_token(struct mm_struct
*mm
)
123 spin_lock(&swap_token_lock
);
124 if (likely(mm
== swap_token_mm
)) {
125 trace_put_swap_token(swap_token_mm
);
126 swap_token_mm
= NULL
;
127 swap_token_memcg
= NULL
;
129 spin_unlock(&swap_token_lock
);
132 static bool match_memcg(struct mem_cgroup
*a
, struct mem_cgroup
*b
)
143 void disable_swap_token(struct mem_cgroup
*memcg
)
145 /* memcg reclaim don't disable unrelated mm token. */
146 if (match_memcg(memcg
, swap_token_memcg
)) {
147 spin_lock(&swap_token_lock
);
148 if (match_memcg(memcg
, swap_token_memcg
)) {
149 trace_disable_swap_token(swap_token_mm
);
150 swap_token_mm
= NULL
;
151 swap_token_memcg
= NULL
;
153 spin_unlock(&swap_token_lock
);