Merge branch 'misc' into release
[linux-2.6/mini2440.git] / arch / x86 / kernel / tlb_64.c
blob8f919ca69494d62541453202b27f4ec13903d2f8
1 #include <linux/init.h>
3 #include <linux/mm.h>
4 #include <linux/delay.h>
5 #include <linux/spinlock.h>
6 #include <linux/smp.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/mc146818rtc.h>
9 #include <linux/interrupt.h>
11 #include <asm/mtrr.h>
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/mmu_context.h>
15 #include <asm/proto.h>
16 #include <asm/apicdef.h>
17 #include <asm/idle.h>
18 #include <asm/uv/uv_hub.h>
19 #include <asm/uv/uv_bau.h>
21 #include <mach_ipi.h>
23 * Smarter SMP flushing macros.
24 * c/o Linus Torvalds.
26 * These mean you can really definitely utterly forget about
27 * writing to user space from interrupts. (Its not allowed anyway).
29 * Optimizations Manfred Spraul <manfred@colorfullife.com>
31 * More scalable flush, from Andi Kleen
33 * To avoid global state use 8 different call vectors.
34 * Each CPU uses a specific vector to trigger flushes on other
35 * CPUs. Depending on the received vector the target CPUs look into
36 * the right per cpu variable for the flush data.
38 * With more than 8 CPUs they are hashed to the 8 available
39 * vectors. The limited global vector space forces us to this right now.
40 * In future when interrupts are split into per CPU domains this could be
41 * fixed, at the cost of triggering multiple IPIs in some cases.
44 union smp_flush_state {
45 struct {
46 cpumask_t flush_cpumask;
47 struct mm_struct *flush_mm;
48 unsigned long flush_va;
49 spinlock_t tlbstate_lock;
51 char pad[SMP_CACHE_BYTES];
52 } ____cacheline_aligned;
54 /* State is put into the per CPU data section, but padded
55 to a full cache line because other CPUs can access it and we don't
56 want false sharing in the per cpu data segment. */
57 static DEFINE_PER_CPU(union smp_flush_state, flush_state);
60 * We cannot call mmdrop() because we are in interrupt context,
61 * instead update mm->cpu_vm_mask.
63 void leave_mm(int cpu)
65 if (read_pda(mmu_state) == TLBSTATE_OK)
66 BUG();
67 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
68 load_cr3(swapper_pg_dir);
70 EXPORT_SYMBOL_GPL(leave_mm);
74 * The flush IPI assumes that a thread switch happens in this order:
75 * [cpu0: the cpu that switches]
76 * 1) switch_mm() either 1a) or 1b)
77 * 1a) thread switch to a different mm
78 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
79 * Stop ipi delivery for the old mm. This is not synchronized with
80 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
81 * for the wrong mm, and in the worst case we perform a superfluous
82 * tlb flush.
83 * 1a2) set cpu mmu_state to TLBSTATE_OK
84 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
85 * was in lazy tlb mode.
86 * 1a3) update cpu active_mm
87 * Now cpu0 accepts tlb flushes for the new mm.
88 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
89 * Now the other cpus will send tlb flush ipis.
90 * 1a4) change cr3.
91 * 1b) thread switch without mm change
92 * cpu active_mm is correct, cpu0 already handles
93 * flush ipis.
94 * 1b1) set cpu mmu_state to TLBSTATE_OK
95 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
96 * Atomically set the bit [other cpus will start sending flush ipis],
97 * and test the bit.
98 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
99 * 2) switch %%esp, ie current
101 * The interrupt must handle 2 special cases:
102 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
103 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
104 * runs in kernel space, the cpu could load tlb entries for user space
105 * pages.
107 * The good news is that cpu mmu_state is local to each cpu, no
108 * write/read ordering problems.
112 * TLB flush IPI:
114 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
115 * 2) Leave the mm if we are in the lazy tlb mode.
117 * Interrupts are disabled.
120 asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
122 int cpu;
123 int sender;
124 union smp_flush_state *f;
126 cpu = smp_processor_id();
128 * orig_rax contains the negated interrupt vector.
129 * Use that to determine where the sender put the data.
131 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
132 f = &per_cpu(flush_state, sender);
134 if (!cpu_isset(cpu, f->flush_cpumask))
135 goto out;
137 * This was a BUG() but until someone can quote me the
138 * line from the intel manual that guarantees an IPI to
139 * multiple CPUs is retried _only_ on the erroring CPUs
140 * its staying as a return
142 * BUG();
145 if (f->flush_mm == read_pda(active_mm)) {
146 if (read_pda(mmu_state) == TLBSTATE_OK) {
147 if (f->flush_va == TLB_FLUSH_ALL)
148 local_flush_tlb();
149 else
150 __flush_tlb_one(f->flush_va);
151 } else
152 leave_mm(cpu);
154 out:
155 ack_APIC_irq();
156 cpu_clear(cpu, f->flush_cpumask);
157 add_pda(irq_tlb_count, 1);
160 void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
161 unsigned long va)
163 int sender;
164 union smp_flush_state *f;
165 cpumask_t cpumask = *cpumaskp;
167 if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
168 return;
170 /* Caller has disabled preemption */
171 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
172 f = &per_cpu(flush_state, sender);
175 * Could avoid this lock when
176 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
177 * probably not worth checking this for a cache-hot lock.
179 spin_lock(&f->tlbstate_lock);
181 f->flush_mm = mm;
182 f->flush_va = va;
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
186 * Make the above memory operations globally visible before
187 * sending the IPI.
189 smp_mb();
191 * We have to send the IPI only to
192 * CPUs affected.
194 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
196 while (!cpus_empty(f->flush_cpumask))
197 cpu_relax();
199 f->flush_mm = NULL;
200 f->flush_va = 0;
201 spin_unlock(&f->tlbstate_lock);
204 static int __cpuinit init_smp_flush(void)
206 int i;
208 for_each_possible_cpu(i)
209 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
211 return 0;
213 core_initcall(init_smp_flush);
215 void flush_tlb_current_task(void)
217 struct mm_struct *mm = current->mm;
218 cpumask_t cpu_mask;
220 preempt_disable();
221 cpu_mask = mm->cpu_vm_mask;
222 cpu_clear(smp_processor_id(), cpu_mask);
224 local_flush_tlb();
225 if (!cpus_empty(cpu_mask))
226 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
227 preempt_enable();
230 void flush_tlb_mm(struct mm_struct *mm)
232 cpumask_t cpu_mask;
234 preempt_disable();
235 cpu_mask = mm->cpu_vm_mask;
236 cpu_clear(smp_processor_id(), cpu_mask);
238 if (current->active_mm == mm) {
239 if (current->mm)
240 local_flush_tlb();
241 else
242 leave_mm(smp_processor_id());
244 if (!cpus_empty(cpu_mask))
245 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
247 preempt_enable();
250 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
252 struct mm_struct *mm = vma->vm_mm;
253 cpumask_t cpu_mask;
255 preempt_disable();
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
259 if (current->active_mm == mm) {
260 if (current->mm)
261 __flush_tlb_one(va);
262 else
263 leave_mm(smp_processor_id());
266 if (!cpus_empty(cpu_mask))
267 flush_tlb_others(cpu_mask, mm, va);
269 preempt_enable();
272 static void do_flush_tlb_all(void *info)
274 unsigned long cpu = smp_processor_id();
276 __flush_tlb_all();
277 if (read_pda(mmu_state) == TLBSTATE_LAZY)
278 leave_mm(cpu);
281 void flush_tlb_all(void)
283 on_each_cpu(do_flush_tlb_all, NULL, 1);