Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / asm-um / mmu_context.h
blob6686fc524ca1aff467688ad52d8976f073a29ea5
1 /*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
6 #ifndef __UM_MMU_CONTEXT_H
7 #define __UM_MMU_CONTEXT_H
9 #include "linux/sched.h"
10 #include "um_mmu.h"
12 extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
13 extern void arch_exit_mmap(struct mm_struct *mm);
15 #define get_mmu_context(task) do ; while(0)
16 #define activate_context(tsk) do ; while(0)
18 #define deactivate_mm(tsk,mm) do { } while (0)
20 extern void force_flush_all(void);
22 static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
25 * This is called by fs/exec.c and fs/aio.c. In the first case, for an
26 * exec, we don't need to do anything as we're called from userspace
27 * and thus going to use a new host PID. In the second, we're called
28 * from a kernel thread, and thus need to go doing the mmap's on the
29 * host. Since they're very expensive, we want to avoid that as far as
30 * possible.
32 if (old != new && (current->flags & PF_BORROWED_MM))
33 __switch_mm(&new->context.id);
35 arch_dup_mmap(old, new);
38 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
39 struct task_struct *tsk)
41 unsigned cpu = smp_processor_id();
43 if(prev != next){
44 cpu_clear(cpu, prev->cpu_vm_mask);
45 cpu_set(cpu, next->cpu_vm_mask);
46 if(next != &init_mm)
47 __switch_mm(&next->context.id);
51 static inline void enter_lazy_tlb(struct mm_struct *mm,
52 struct task_struct *tsk)
56 extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
58 extern void destroy_context(struct mm_struct *mm);
60 #endif