4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
8 * The routines in this file are used to kill a process when
9 * we're seriously out of memory. This gets called from __alloc_pages()
10 * in mm/page_alloc.c when we really run out of memory.
12 * Since we won't call these routines often (on a well-configured
13 * machine) this file will double as a 'coding guide' and a signpost
14 * for newbie kernel hackers. It features several pointers to major
15 * kernel subsystems and hints as to where to find out what things do.
18 #include <linux/oom.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/swap.h>
23 #include <linux/timex.h>
24 #include <linux/jiffies.h>
25 #include <linux/cpuset.h>
26 #include <linux/module.h>
27 #include <linux/notifier.h>
29 int sysctl_panic_on_oom
;
30 int sysctl_oom_kill_allocating_task
;
31 static DEFINE_SPINLOCK(zone_scan_mutex
);
35 * badness - calculate a numeric value for how bad this task has been
36 * @p: task struct of which task we should calculate
37 * @uptime: current uptime in seconds
39 * The formula used is relatively simple and documented inline in the
40 * function. The main rationale is that we want to select a good task
41 * to kill when we run out of memory.
43 * Good in this context means that:
44 * 1) we lose the minimum amount of work done
45 * 2) we recover a large amount of memory
46 * 3) we don't kill anything innocent of eating tons of memory
47 * 4) we want to kill the minimum amount of processes (one)
48 * 5) we try to kill the process the user expects us to kill, this
49 * algorithm has been meticulously tuned to meet the principle
50 * of least surprise ... (be careful when you change it)
53 unsigned long badness(struct task_struct
*p
, unsigned long uptime
)
55 unsigned long points
, cpu_time
, run_time
, s
;
57 struct task_struct
*child
;
67 * The memory size of the process is the basis for the badness.
69 points
= mm
->total_vm
;
72 * After this unlock we can no longer dereference local variable `mm'
77 * swapoff can easily use up all memory, so kill those first.
79 if (p
->flags
& PF_SWAPOFF
)
83 * Processes which fork a lot of child processes are likely
84 * a good choice. We add half the vmsize of the children if they
85 * have an own mm. This prevents forking servers to flood the
86 * machine with an endless amount of children. In case a single
87 * child is eating the vast majority of memory, adding only half
88 * to the parents will make the child our kill candidate of choice.
90 list_for_each_entry(child
, &p
->children
, sibling
) {
92 if (child
->mm
!= mm
&& child
->mm
)
93 points
+= child
->mm
->total_vm
/2 + 1;
98 * CPU time is in tens of seconds and run time is in thousands
99 * of seconds. There is no particular reason for this other than
100 * that it turned out to work very well in practice.
102 cpu_time
= (cputime_to_jiffies(p
->utime
) + cputime_to_jiffies(p
->stime
))
105 if (uptime
>= p
->start_time
.tv_sec
)
106 run_time
= (uptime
- p
->start_time
.tv_sec
) >> 10;
110 s
= int_sqrt(cpu_time
);
113 s
= int_sqrt(int_sqrt(run_time
));
118 * Niced processes are most likely less important, so double
119 * their badness points.
121 if (task_nice(p
) > 0)
125 * Superuser processes are usually more important, so we make it
126 * less likely that we kill those.
128 if (cap_t(p
->cap_effective
) & CAP_TO_MASK(CAP_SYS_ADMIN
) ||
129 p
->uid
== 0 || p
->euid
== 0)
133 * We don't want to kill a process with direct hardware access.
134 * Not only could that mess up the hardware, but usually users
135 * tend to only have this flag set on applications they think
138 if (cap_t(p
->cap_effective
) & CAP_TO_MASK(CAP_SYS_RAWIO
))
142 * If p's nodes don't overlap ours, it may still help to kill p
143 * because p may have allocated or otherwise mapped memory on
144 * this node before. However it will be less likely.
146 if (!cpuset_mems_allowed_intersects(current
, p
))
150 * Adjust the score by oomkilladj.
153 if (p
->oomkilladj
> 0) {
156 points
<<= p
->oomkilladj
;
158 points
>>= -(p
->oomkilladj
);
162 printk(KERN_DEBUG
"OOMkill: task %d (%s) got %lu points\n",
163 p
->pid
, p
->comm
, points
);
169 * Determine the type of allocation constraint.
171 static inline enum oom_constraint
constrained_alloc(struct zonelist
*zonelist
,
176 nodemask_t nodes
= node_states
[N_HIGH_MEMORY
];
178 for (z
= zonelist
->zones
; *z
; z
++)
179 if (cpuset_zone_allowed_softwall(*z
, gfp_mask
))
180 node_clear(zone_to_nid(*z
), nodes
);
182 return CONSTRAINT_CPUSET
;
184 if (!nodes_empty(nodes
))
185 return CONSTRAINT_MEMORY_POLICY
;
188 return CONSTRAINT_NONE
;
192 * Simple selection loop. We chose the process with the highest
193 * number of 'points'. We expect the caller will lock the tasklist.
195 * (not docbooked, we don't want this one cluttering up the manual)
197 static struct task_struct
*select_bad_process(unsigned long *ppoints
)
199 struct task_struct
*g
, *p
;
200 struct task_struct
*chosen
= NULL
;
201 struct timespec uptime
;
204 do_posix_clock_monotonic_gettime(&uptime
);
205 do_each_thread(g
, p
) {
206 unsigned long points
;
209 * skip kernel threads and tasks which have already released
214 /* skip the init task */
215 if (is_global_init(p
))
219 * This task already has access to memory reserves and is
220 * being killed. Don't allow any other task access to the
223 * Note: this may have a chance of deadlock if it gets
224 * blocked waiting for another task which itself is waiting
225 * for memory. Is there a better alternative?
227 if (test_tsk_thread_flag(p
, TIF_MEMDIE
))
228 return ERR_PTR(-1UL);
231 * This is in the process of releasing memory so wait for it
232 * to finish before killing some other task by mistake.
234 * However, if p is the current task, we allow the 'kill' to
235 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
236 * which will allow it to gain access to memory reserves in
237 * the process of exiting and releasing its resources.
238 * Otherwise we could get an easy OOM deadlock.
240 if (p
->flags
& PF_EXITING
) {
242 return ERR_PTR(-1UL);
245 *ppoints
= ULONG_MAX
;
248 if (p
->oomkilladj
== OOM_DISABLE
)
251 points
= badness(p
, uptime
.tv_sec
);
252 if (points
> *ppoints
|| !chosen
) {
256 } while_each_thread(g
, p
);
262 * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
263 * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
266 static void __oom_kill_task(struct task_struct
*p
, int verbose
)
268 if (is_global_init(p
)) {
270 printk(KERN_WARNING
"tried to kill init!\n");
276 printk(KERN_WARNING
"tried to kill an mm-less task!\n");
281 printk(KERN_ERR
"Killed process %d (%s)\n",
282 task_pid_nr(p
), p
->comm
);
285 * We give our sacrificial lamb high priority and access to
286 * all the memory it needs. That way it should be able to
287 * exit() and clear out its resources quickly...
289 p
->rt
.time_slice
= HZ
;
290 set_tsk_thread_flag(p
, TIF_MEMDIE
);
292 force_sig(SIGKILL
, p
);
295 static int oom_kill_task(struct task_struct
*p
)
297 struct mm_struct
*mm
;
298 struct task_struct
*g
, *q
;
302 /* WARNING: mm may not be dereferenced since we did not obtain its
303 * value from get_task_mm(p). This is OK since all we need to do is
304 * compare mm to q->mm below.
306 * Furthermore, even if mm contains a non-NULL value, p->mm may
307 * change to NULL at any time since we do not hold task_lock(p).
308 * However, this is of no concern to us.
315 * Don't kill the process if any threads are set to OOM_DISABLE
317 do_each_thread(g
, q
) {
318 if (q
->mm
== mm
&& q
->oomkilladj
== OOM_DISABLE
)
320 } while_each_thread(g
, q
);
322 __oom_kill_task(p
, 1);
325 * kill all processes that share the ->mm (i.e. all threads),
326 * but are in a different thread group. Don't let them have access
327 * to memory reserves though, otherwise we might deplete all memory.
329 do_each_thread(g
, q
) {
330 if (q
->mm
== mm
&& !same_thread_group(q
, p
))
331 force_sig(SIGKILL
, q
);
332 } while_each_thread(g
, q
);
337 static int oom_kill_process(struct task_struct
*p
, gfp_t gfp_mask
, int order
,
338 unsigned long points
, const char *message
)
340 struct task_struct
*c
;
342 if (printk_ratelimit()) {
343 printk(KERN_WARNING
"%s invoked oom-killer: "
344 "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
345 current
->comm
, gfp_mask
, order
, current
->oomkilladj
);
351 * If the task is already exiting, don't alarm the sysadmin or kill
352 * its children or threads, just set TIF_MEMDIE so it can die quickly
354 if (p
->flags
& PF_EXITING
) {
355 __oom_kill_task(p
, 0);
359 printk(KERN_ERR
"%s: kill process %d (%s) score %li or a child\n",
360 message
, task_pid_nr(p
), p
->comm
, points
);
362 /* Try to kill a child first */
363 list_for_each_entry(c
, &p
->children
, sibling
) {
366 if (!oom_kill_task(c
))
369 return oom_kill_task(p
);
372 static BLOCKING_NOTIFIER_HEAD(oom_notify_list
);
374 int register_oom_notifier(struct notifier_block
*nb
)
376 return blocking_notifier_chain_register(&oom_notify_list
, nb
);
378 EXPORT_SYMBOL_GPL(register_oom_notifier
);
380 int unregister_oom_notifier(struct notifier_block
*nb
)
382 return blocking_notifier_chain_unregister(&oom_notify_list
, nb
);
384 EXPORT_SYMBOL_GPL(unregister_oom_notifier
);
387 * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
388 * if a parallel OOM killing is already taking place that includes a zone in
389 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
391 int try_set_zone_oom(struct zonelist
*zonelist
)
398 spin_lock(&zone_scan_mutex
);
400 if (zone_is_oom_locked(*z
)) {
404 } while (*(++z
) != NULL
);
407 * Lock each zone in the zonelist under zone_scan_mutex so a parallel
408 * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
412 zone_set_flag(*z
, ZONE_OOM_LOCKED
);
413 } while (*(++z
) != NULL
);
415 spin_unlock(&zone_scan_mutex
);
420 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
421 * allocation attempts with zonelists containing them may now recall the OOM
422 * killer, if necessary.
424 void clear_zonelist_oom(struct zonelist
*zonelist
)
430 spin_lock(&zone_scan_mutex
);
432 zone_clear_flag(*z
, ZONE_OOM_LOCKED
);
433 } while (*(++z
) != NULL
);
434 spin_unlock(&zone_scan_mutex
);
438 * out_of_memory - kill the "best" process when we run out of memory
440 * If we run out of memory, we have the choice between either
441 * killing a random task (bad), letting the system crash (worse)
442 * OR try to be smart about which process to kill. Note that we
443 * don't have to be perfect here, we just have to be good.
445 void out_of_memory(struct zonelist
*zonelist
, gfp_t gfp_mask
, int order
)
447 struct task_struct
*p
;
448 unsigned long points
= 0;
449 unsigned long freed
= 0;
450 enum oom_constraint constraint
;
452 blocking_notifier_call_chain(&oom_notify_list
, 0, &freed
);
454 /* Got some memory back in the last second. */
457 if (sysctl_panic_on_oom
== 2)
458 panic("out of memory. Compulsory panic_on_oom is selected.\n");
461 * Check if there were limitations on the allocation (only relevant for
462 * NUMA) that may require different handling.
464 constraint
= constrained_alloc(zonelist
, gfp_mask
);
465 read_lock(&tasklist_lock
);
467 switch (constraint
) {
468 case CONSTRAINT_MEMORY_POLICY
:
469 oom_kill_process(current
, gfp_mask
, order
, points
,
470 "No available memory (MPOL_BIND)");
473 case CONSTRAINT_NONE
:
474 if (sysctl_panic_on_oom
)
475 panic("out of memory. panic_on_oom is selected\n");
477 case CONSTRAINT_CPUSET
:
478 if (sysctl_oom_kill_allocating_task
) {
479 oom_kill_process(current
, gfp_mask
, order
, points
,
480 "Out of memory (oom_kill_allocating_task)");
485 * Rambo mode: Shoot down a process and hope it solves whatever
486 * issues we may have.
488 p
= select_bad_process(&points
);
490 if (PTR_ERR(p
) == -1UL)
493 /* Found nothing?!?! Either we hang forever, or we panic. */
495 read_unlock(&tasklist_lock
);
496 panic("Out of memory and no killable processes...\n");
499 if (oom_kill_process(p
, gfp_mask
, order
, points
,
507 read_unlock(&tasklist_lock
);
510 * Give "p" a good chance of killing itself before we
511 * retry to allocate memory unless "p" is current
513 if (!test_thread_flag(TIF_MEMDIE
))
514 schedule_timeout_uninterruptible(1);