x86-64: Move TLB state from PDA to per-cpu and consolidate with 32-bit.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / include / asm / mmu_context_64.h
blobc4572505ab3e06eaee71d2a88d7bd84869c8f4ab
1 #ifndef _ASM_X86_MMU_CONTEXT_64_H
2 #define _ASM_X86_MMU_CONTEXT_64_H
4 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
6 #ifdef CONFIG_SMP
7 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9 #endif
12 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13 struct task_struct *tsk)
15 unsigned cpu = smp_processor_id();
16 if (likely(prev != next)) {
17 /* stop flush ipis for the previous mm */
18 cpu_clear(cpu, prev->cpu_vm_mask);
19 #ifdef CONFIG_SMP
20 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
21 percpu_write(cpu_tlbstate.active_mm, next);
22 #endif
23 cpu_set(cpu, next->cpu_vm_mask);
24 load_cr3(next->pgd);
26 if (unlikely(next->context.ldt != prev->context.ldt))
27 load_LDT_nolock(&next->context);
29 #ifdef CONFIG_SMP
30 else {
31 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
32 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
34 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
35 /* We were in lazy tlb mode and leave_mm disabled
36 * tlb flush IPI delivery. We must reload CR3
37 * to make sure to use no freed page tables.
39 load_cr3(next->pgd);
40 load_LDT_nolock(&next->context);
43 #endif
46 #define deactivate_mm(tsk, mm) \
47 do { \
48 load_gs_index(0); \
49 asm volatile("movl %0,%%fs"::"r"(0)); \
50 } while (0)
52 #endif /* _ASM_X86_MMU_CONTEXT_64_H */