1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/system.h>
11 * TLB-flush needs to be nonpreemptible on PREEMPT_RT due to the
12 * following complex race scenario:
14 * if the current task is lazy-TLB and does a TLB flush and
15 * gets preempted after the movl %%r3, %0 but before the
16 * movl %0, %%cr3 then its ->active_mm might change and it will
17 * install the wrong cr3 when it switches back. This is not a
18 * problem for the lazy-TLB task itself, but if the next task it
19 * switches to has an ->mm that is also the lazy-TLB task's
20 * new ->active_mm, then the scheduler will assume that cr3 is
21 * the new one, while we overwrote it with the old one. The result
22 * is the wrong cr3 in the new (non-lazy-TLB) task, which typically
23 * causes an infinite pagefault upon the next userspace access.
25 #ifdef CONFIG_PARAVIRT
26 #include <asm/paravirt.h>
28 #define __flush_tlb() __native_flush_tlb()
29 #define __flush_tlb_global() __native_flush_tlb_global()
30 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
33 static inline void __native_flush_tlb(void)
36 write_cr3(read_cr3());
40 static inline void __native_flush_tlb_global(void)
46 * Read-modify-write to CR4 - protect it from preemption and
47 * from interrupts. (Use the raw variant because this code can
48 * be called from deep inside debugging code.)
50 raw_local_irq_save(flags
);
54 write_cr4(cr4
& ~X86_CR4_PGE
);
55 /* write old PGE again and flush TLBs */
58 raw_local_irq_restore(flags
);
61 static inline void __native_flush_tlb_single(unsigned long addr
)
63 asm volatile("invlpg (%0)" ::"r" (addr
) : "memory");
66 static inline void __flush_tlb_all(void)
74 static inline void __flush_tlb_one(unsigned long addr
)
77 __flush_tlb_single(addr
);
83 # define TLB_FLUSH_ALL 0xffffffff
85 # define TLB_FLUSH_ALL -1ULL
91 * - flush_tlb() flushes the current mm struct TLBs
92 * - flush_tlb_all() flushes all processes TLBs
93 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
94 * - flush_tlb_page(vma, vmaddr) flushes one page
95 * - flush_tlb_range(vma, start, end) flushes a range of pages
96 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
97 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
99 * ..but the i386 has somewhat limited tlb flushing capabilities,
100 * and page-granular flushes are available only on i486 and up.
102 * x86-64 can only flush individual pages or full VMs. For a range flush
103 * we always do the full VM. Might be worth trying if for a small
104 * range a few INVLPGs in a row are a win.
109 #define flush_tlb() __flush_tlb()
110 #define flush_tlb_all() __flush_tlb_all()
111 #define local_flush_tlb() __flush_tlb()
113 static inline void flush_tlb_mm(struct mm_struct
*mm
)
116 * This is safe on PREEMPT_RT because if we preempt
117 * right after the check but before the __flush_tlb(),
118 * and if ->active_mm changes, then we might miss a
119 * TLB flush, but that TLB flush happened already when
120 * ->active_mm was changed:
122 if (mm
== current
->active_mm
)
126 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
129 if (vma
->vm_mm
== current
->active_mm
)
130 __flush_tlb_one(addr
);
133 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
134 unsigned long start
, unsigned long end
)
136 if (vma
->vm_mm
== current
->active_mm
)
140 static inline void native_flush_tlb_others(const struct cpumask
*cpumask
,
141 struct mm_struct
*mm
,
146 static inline void reset_lazy_tlbstate(void)
154 #define local_flush_tlb() __flush_tlb()
156 extern void flush_tlb_all(void);
157 extern void flush_tlb_current_task(void);
158 extern void flush_tlb_mm(struct mm_struct
*);
159 extern void flush_tlb_page(struct vm_area_struct
*, unsigned long);
161 #define flush_tlb() flush_tlb_current_task()
163 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
164 unsigned long start
, unsigned long end
)
166 flush_tlb_mm(vma
->vm_mm
);
169 void native_flush_tlb_others(const struct cpumask
*cpumask
,
170 struct mm_struct
*mm
, unsigned long va
);
172 #define TLBSTATE_OK 1
173 #define TLBSTATE_LAZY 2
176 struct mm_struct
*active_mm
;
179 DECLARE_PER_CPU(struct tlb_state
, cpu_tlbstate
);
181 static inline void reset_lazy_tlbstate(void)
183 percpu_write(cpu_tlbstate
.state
, 0);
184 percpu_write(cpu_tlbstate
.active_mm
, &init_mm
);
189 #ifndef CONFIG_PARAVIRT
190 #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
193 static inline void flush_tlb_kernel_range(unsigned long start
,
199 extern void zap_low_mappings(void);
201 #endif /* _ASM_X86_TLBFLUSH_H */