x86, MAINTAINERS: Add x86 MCE people
[linux-2.6/libata-dev.git] / mm / thrash.c
blobfabf2d0f51695eddacdb6b013fac0b9cdee3a655
1 /*
2 * mm/thrash.c
4 * Copyright (C) 2004, Red Hat, Inc.
5 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
6 * Released under the GPL, see the file COPYING for details.
8 * Simple token based thrashing protection, using the algorithm
9 * described in: http://www.cs.wm.edu/~sjiang/token.pdf
11 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
12 * Improved algorithm to pass token:
13 * Each task has a priority which is incremented if it contended
14 * for the token in an interval less than its previous attempt.
15 * If the token is acquired, that task's priority is boosted to prevent
16 * the token from bouncing around too often and to let the task make
17 * some progress in its execution.
20 #include <linux/jiffies.h>
21 #include <linux/mm.h>
22 #include <linux/sched.h>
23 #include <linux/swap.h>
24 #include <linux/memcontrol.h>
26 #include <trace/events/vmscan.h>
28 #define TOKEN_AGING_INTERVAL (0xFF)
30 static DEFINE_SPINLOCK(swap_token_lock);
31 struct mm_struct *swap_token_mm;
32 struct mem_cgroup *swap_token_memcg;
33 static unsigned int global_faults;
34 static unsigned int last_aging;
36 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
37 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
39 struct mem_cgroup *memcg;
41 memcg = try_get_mem_cgroup_from_mm(mm);
42 if (memcg)
43 css_put(mem_cgroup_css(memcg));
45 return memcg;
47 #else
48 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
50 return NULL;
52 #endif
54 void grab_swap_token(struct mm_struct *mm)
56 int current_interval;
57 unsigned int old_prio = mm->token_priority;
59 global_faults++;
61 current_interval = global_faults - mm->faultstamp;
63 if (!spin_trylock(&swap_token_lock))
64 return;
66 /* First come first served */
67 if (!swap_token_mm)
68 goto replace_token;
70 if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
71 swap_token_mm->token_priority /= 2;
72 last_aging = global_faults;
75 if (mm == swap_token_mm) {
76 mm->token_priority += 2;
77 goto update_priority;
80 if (current_interval < mm->last_interval)
81 mm->token_priority++;
82 else {
83 if (likely(mm->token_priority > 0))
84 mm->token_priority--;
87 /* Check if we deserve the token */
88 if (mm->token_priority > swap_token_mm->token_priority)
89 goto replace_token;
91 update_priority:
92 trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
94 out:
95 mm->faultstamp = global_faults;
96 mm->last_interval = current_interval;
97 spin_unlock(&swap_token_lock);
98 return;
100 replace_token:
101 mm->token_priority += 2;
102 trace_replace_swap_token(swap_token_mm, mm);
103 swap_token_mm = mm;
104 swap_token_memcg = swap_token_memcg_from_mm(mm);
105 last_aging = global_faults;
106 goto out;
109 /* Called on process exit. */
110 void __put_swap_token(struct mm_struct *mm)
112 spin_lock(&swap_token_lock);
113 if (likely(mm == swap_token_mm)) {
114 trace_put_swap_token(swap_token_mm);
115 swap_token_mm = NULL;
116 swap_token_memcg = NULL;
118 spin_unlock(&swap_token_lock);
121 static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
123 if (!a)
124 return true;
125 if (!b)
126 return true;
127 if (a == b)
128 return true;
129 return false;
132 void disable_swap_token(struct mem_cgroup *memcg)
134 /* memcg reclaim don't disable unrelated mm token. */
135 if (match_memcg(memcg, swap_token_memcg)) {
136 spin_lock(&swap_token_lock);
137 if (match_memcg(memcg, swap_token_memcg)) {
138 trace_disable_swap_token(swap_token_mm);
139 swap_token_mm = NULL;
140 swap_token_memcg = NULL;
142 spin_unlock(&swap_token_lock);