1 #ifndef _ASM_X86_MMU_CONTEXT_64_H
2 #define _ASM_X86_MMU_CONTEXT_64_H
4 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
7 if (percpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
)
8 percpu_write(cpu_tlbstate
.state
, TLBSTATE_LAZY
);
12 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
13 struct task_struct
*tsk
)
15 unsigned cpu
= smp_processor_id();
16 if (likely(prev
!= next
)) {
17 /* stop flush ipis for the previous mm */
18 cpu_clear(cpu
, prev
->cpu_vm_mask
);
20 percpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
21 percpu_write(cpu_tlbstate
.active_mm
, next
);
23 cpu_set(cpu
, next
->cpu_vm_mask
);
26 if (unlikely(next
->context
.ldt
!= prev
->context
.ldt
))
27 load_LDT_nolock(&next
->context
);
31 percpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
32 BUG_ON(percpu_read(cpu_tlbstate
.active_mm
) != next
);
34 if (!cpu_test_and_set(cpu
, next
->cpu_vm_mask
)) {
35 /* We were in lazy tlb mode and leave_mm disabled
36 * tlb flush IPI delivery. We must reload CR3
37 * to make sure to use no freed page tables.
40 load_LDT_nolock(&next
->context
);
46 #define deactivate_mm(tsk, mm) \
49 asm volatile("movl %0,%%fs"::"r"(0)); \
52 #endif /* _ASM_X86_MMU_CONTEXT_64_H */