sched: Fix race between task_group and sched_task_group
[linux-2.6/btrfs-unstable.git] / mm / mmu_context.c
blobf802c2d216a7d28bf76c5c911d83d10d213fd474
1 /* Copyright (C) 2009 Red Hat, Inc.
3 * See ../COPYING for licensing terms.
4 */
6 #include <linux/mm.h>
7 #include <linux/mmu_context.h>
8 #include <linux/export.h>
9 #include <linux/sched.h>
11 #include <asm/mmu_context.h>
14 * use_mm
15 * Makes the calling kernel thread take on the specified
16 * mm context.
17 * (Note: this routine is intended to be called only
18 * from a kernel thread context)
20 void use_mm(struct mm_struct *mm)
22 struct mm_struct *active_mm;
23 struct task_struct *tsk = current;
25 task_lock(tsk);
26 active_mm = tsk->active_mm;
27 if (active_mm != mm) {
28 atomic_inc(&mm->mm_count);
29 tsk->active_mm = mm;
31 tsk->mm = mm;
32 switch_mm(active_mm, mm, tsk);
33 task_unlock(tsk);
34 #ifdef finish_arch_post_lock_switch
35 finish_arch_post_lock_switch();
36 #endif
38 if (active_mm != mm)
39 mmdrop(active_mm);
41 EXPORT_SYMBOL_GPL(use_mm);
44 * unuse_mm
45 * Reverses the effect of use_mm, i.e. releases the
46 * specified mm context which was earlier taken on
47 * by the calling kernel thread
48 * (Note: this routine is intended to be called only
49 * from a kernel thread context)
51 void unuse_mm(struct mm_struct *mm)
53 struct task_struct *tsk = current;
55 task_lock(tsk);
56 sync_mm_rss(mm);
57 tsk->mm = NULL;
58 /* active_mm is still 'mm' */
59 enter_lazy_tlb(mm, tsk);
60 task_unlock(tsk);
62 EXPORT_SYMBOL_GPL(unuse_mm);