4 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
7 unsigned cpu
= smp_processor_id();
8 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
9 per_cpu(cpu_tlbstate
, cpu
).state
= TLBSTATE_LAZY
;
13 static inline void switch_mm(struct mm_struct
*prev
,
14 struct mm_struct
*next
,
15 struct task_struct
*tsk
)
17 int cpu
= smp_processor_id();
19 if (likely(prev
!= next
)) {
20 /* stop flush ipis for the previous mm */
21 cpu_clear(cpu
, prev
->cpu_vm_mask
);
23 per_cpu(cpu_tlbstate
, cpu
).state
= TLBSTATE_OK
;
24 per_cpu(cpu_tlbstate
, cpu
).active_mm
= next
;
26 cpu_set(cpu
, next
->cpu_vm_mask
);
28 /* Re-load page tables */
32 * load the LDT, if the LDT is different:
34 if (unlikely(prev
->context
.ldt
!= next
->context
.ldt
))
35 load_LDT_nolock(&next
->context
);
39 per_cpu(cpu_tlbstate
, cpu
).state
= TLBSTATE_OK
;
40 BUG_ON(per_cpu(cpu_tlbstate
, cpu
).active_mm
!= next
);
42 if (!cpu_test_and_set(cpu
, next
->cpu_vm_mask
)) {
43 /* We were in lazy tlb mode and leave_mm disabled
44 * tlb flush IPI delivery. We must reload %cr3.
47 load_LDT_nolock(&next
->context
);
53 #define deactivate_mm(tsk, mm) \
54 asm("movl %0,%%gs": :"r" (0));