5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8 #include <asm/paravirt.h>
9 #ifndef CONFIG_PARAVIRT
10 #include <asm-generic/mm_hooks.h>
12 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
13 struct mm_struct
*next
)
16 #endif /* !CONFIG_PARAVIRT */
20 * Used for LDT copy/destruction.
22 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
);
23 void destroy_context(struct mm_struct
*mm
);
26 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
29 unsigned cpu
= smp_processor_id();
30 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
31 per_cpu(cpu_tlbstate
, cpu
).state
= TLBSTATE_LAZY
;
35 static inline void switch_mm(struct mm_struct
*prev
,
36 struct mm_struct
*next
,
37 struct task_struct
*tsk
)
39 int cpu
= smp_processor_id();
41 if (likely(prev
!= next
)) {
42 /* stop flush ipis for the previous mm */
43 cpu_clear(cpu
, prev
->cpu_vm_mask
);
45 per_cpu(cpu_tlbstate
, cpu
).state
= TLBSTATE_OK
;
46 per_cpu(cpu_tlbstate
, cpu
).active_mm
= next
;
48 cpu_set(cpu
, next
->cpu_vm_mask
);
50 /* Re-load page tables */
54 * load the LDT, if the LDT is different:
56 if (unlikely(prev
->context
.ldt
!= next
->context
.ldt
))
57 load_LDT_nolock(&next
->context
);
61 per_cpu(cpu_tlbstate
, cpu
).state
= TLBSTATE_OK
;
62 BUG_ON(per_cpu(cpu_tlbstate
, cpu
).active_mm
!= next
);
64 if (!cpu_test_and_set(cpu
, next
->cpu_vm_mask
)) {
65 /* We were in lazy tlb mode and leave_mm disabled
66 * tlb flush IPI delivery. We must reload %cr3.
69 load_LDT_nolock(&next
->context
);
75 #define deactivate_mm(tsk, mm) \
76 asm("movl %0,%%gs": :"r" (0));
78 #define activate_mm(prev, next) \
80 paravirt_activate_mm(prev, next); \
81 switch_mm((prev),(next),NULL); \