2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/interrupt.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mach_apic.h>
26 #include <asm/mmu_context.h>
27 #include <asm/proto.h>
28 #include <asm/apicdef.h>
32 * Smarter SMP flushing macros.
35 * These mean you can really definitely utterly forget about
36 * writing to user space from interrupts. (Its not allowed anyway).
38 * Optimizations Manfred Spraul <manfred@colorfullife.com>
40 * More scalable flush, from Andi Kleen
42 * To avoid global state use 8 different call vectors.
43 * Each CPU uses a specific vector to trigger flushes on other
44 * CPUs. Depending on the received vector the target CPUs look into
45 * the right per cpu variable for the flush data.
47 * With more than 8 CPUs they are hashed to the 8 available
48 * vectors. The limited global vector space forces us to this right now.
49 * In future when interrupts are split into per CPU domains this could be
50 * fixed, at the cost of triggering multiple IPIs in some cases.
53 union smp_flush_state
{
55 cpumask_t flush_cpumask
;
56 struct mm_struct
*flush_mm
;
57 unsigned long flush_va
;
58 spinlock_t tlbstate_lock
;
60 char pad
[SMP_CACHE_BYTES
];
61 } ____cacheline_aligned
;
63 /* State is put into the per CPU data section, but padded
64 to a full cache line because other CPUs can access it and we don't
65 want false sharing in the per cpu data segment. */
66 static DEFINE_PER_CPU(union smp_flush_state
, flush_state
);
69 * We cannot call mmdrop() because we are in interrupt context,
70 * instead update mm->cpu_vm_mask.
72 void leave_mm(int cpu
)
74 if (read_pda(mmu_state
) == TLBSTATE_OK
)
76 cpu_clear(cpu
, read_pda(active_mm
)->cpu_vm_mask
);
77 load_cr3(swapper_pg_dir
);
79 EXPORT_SYMBOL_GPL(leave_mm
);
83 * The flush IPI assumes that a thread switch happens in this order:
84 * [cpu0: the cpu that switches]
85 * 1) switch_mm() either 1a) or 1b)
86 * 1a) thread switch to a different mm
87 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
88 * Stop ipi delivery for the old mm. This is not synchronized with
89 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
90 * for the wrong mm, and in the worst case we perform a superfluous
92 * 1a2) set cpu mmu_state to TLBSTATE_OK
93 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
94 * was in lazy tlb mode.
95 * 1a3) update cpu active_mm
96 * Now cpu0 accepts tlb flushes for the new mm.
97 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
98 * Now the other cpus will send tlb flush ipis.
100 * 1b) thread switch without mm change
101 * cpu active_mm is correct, cpu0 already handles
103 * 1b1) set cpu mmu_state to TLBSTATE_OK
104 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
105 * Atomically set the bit [other cpus will start sending flush ipis],
107 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
108 * 2) switch %%esp, ie current
110 * The interrupt must handle 2 special cases:
111 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
112 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
113 * runs in kernel space, the cpu could load tlb entries for user space
116 * The good news is that cpu mmu_state is local to each cpu, no
117 * write/read ordering problems.
123 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
124 * 2) Leave the mm if we are in the lazy tlb mode.
126 * Interrupts are disabled.
129 asmlinkage
void smp_invalidate_interrupt(struct pt_regs
*regs
)
133 union smp_flush_state
*f
;
135 cpu
= smp_processor_id();
137 * orig_rax contains the negated interrupt vector.
138 * Use that to determine where the sender put the data.
140 sender
= ~regs
->orig_ax
- INVALIDATE_TLB_VECTOR_START
;
141 f
= &per_cpu(flush_state
, sender
);
143 if (!cpu_isset(cpu
, f
->flush_cpumask
))
146 * This was a BUG() but until someone can quote me the
147 * line from the intel manual that guarantees an IPI to
148 * multiple CPUs is retried _only_ on the erroring CPUs
149 * its staying as a return
154 if (f
->flush_mm
== read_pda(active_mm
)) {
155 if (read_pda(mmu_state
) == TLBSTATE_OK
) {
156 if (f
->flush_va
== TLB_FLUSH_ALL
)
159 __flush_tlb_one(f
->flush_va
);
165 cpu_clear(cpu
, f
->flush_cpumask
);
166 add_pda(irq_tlb_count
, 1);
169 void native_flush_tlb_others(const cpumask_t
*cpumaskp
, struct mm_struct
*mm
,
173 union smp_flush_state
*f
;
174 cpumask_t cpumask
= *cpumaskp
;
176 /* Caller has disabled preemption */
177 sender
= smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS
;
178 f
= &per_cpu(flush_state
, sender
);
181 * Could avoid this lock when
182 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
183 * probably not worth checking this for a cache-hot lock.
185 spin_lock(&f
->tlbstate_lock
);
189 cpus_or(f
->flush_cpumask
, cpumask
, f
->flush_cpumask
);
192 * We have to send the IPI only to
195 send_IPI_mask(cpumask
, INVALIDATE_TLB_VECTOR_START
+ sender
);
197 while (!cpus_empty(f
->flush_cpumask
))
202 spin_unlock(&f
->tlbstate_lock
);
205 int __cpuinit
init_smp_flush(void)
209 for_each_cpu_mask(i
, cpu_possible_map
) {
210 spin_lock_init(&per_cpu(flush_state
, i
).tlbstate_lock
);
214 core_initcall(init_smp_flush
);
216 void flush_tlb_current_task(void)
218 struct mm_struct
*mm
= current
->mm
;
222 cpu_mask
= mm
->cpu_vm_mask
;
223 cpu_clear(smp_processor_id(), cpu_mask
);
226 if (!cpus_empty(cpu_mask
))
227 flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
231 void flush_tlb_mm (struct mm_struct
* mm
)
236 cpu_mask
= mm
->cpu_vm_mask
;
237 cpu_clear(smp_processor_id(), cpu_mask
);
239 if (current
->active_mm
== mm
) {
243 leave_mm(smp_processor_id());
245 if (!cpus_empty(cpu_mask
))
246 flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
251 void flush_tlb_page(struct vm_area_struct
* vma
, unsigned long va
)
253 struct mm_struct
*mm
= vma
->vm_mm
;
257 cpu_mask
= mm
->cpu_vm_mask
;
258 cpu_clear(smp_processor_id(), cpu_mask
);
260 if (current
->active_mm
== mm
) {
264 leave_mm(smp_processor_id());
267 if (!cpus_empty(cpu_mask
))
268 flush_tlb_others(cpu_mask
, mm
, va
);
273 static void do_flush_tlb_all(void* info
)
275 unsigned long cpu
= smp_processor_id();
278 if (read_pda(mmu_state
) == TLBSTATE_LAZY
)
282 void flush_tlb_all(void)
284 on_each_cpu(do_flush_tlb_all
, NULL
, 1, 1);
288 * this function sends a 'reschedule' IPI to another CPU.
289 * it goes straight through and wastes no time serializing
290 * anything. Worst case is that we lose a reschedule ...
293 static void native_smp_send_reschedule(int cpu
)
295 WARN_ON(cpu_is_offline(cpu
));
296 send_IPI_mask(cpumask_of_cpu(cpu
), RESCHEDULE_VECTOR
);
300 * Structure and data for smp_call_function(). This is designed to minimise
301 * static memory requirements. It also looks cleaner.
303 static DEFINE_SPINLOCK(call_lock
);
305 struct call_data_struct
{
306 void (*func
) (void *info
);
313 static struct call_data_struct
* call_data
;
315 void lock_ipi_call_lock(void)
317 spin_lock_irq(&call_lock
);
320 void unlock_ipi_call_lock(void)
322 spin_unlock_irq(&call_lock
);
325 static void __smp_call_function(void (*func
) (void *info
), void *info
,
326 int nonatomic
, int wait
)
328 struct call_data_struct data
;
329 int cpus
= num_online_cpus() - 1;
336 atomic_set(&data
.started
, 0);
339 atomic_set(&data
.finished
, 0);
344 /* Send a message to all other CPUs and wait for them to respond */
345 send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
347 /* Wait for response */
348 while (atomic_read(&data
.started
) != cpus
)
352 while (atomic_read(&data
.finished
) != cpus
)
357 int native_smp_call_function_mask(cpumask_t mask
,
358 void (*func
)(void *), void *info
,
361 struct call_data_struct data
;
362 cpumask_t allbutself
;
365 /* Can deadlock when called with interrupts disabled */
366 WARN_ON(irqs_disabled());
368 /* Holding any lock stops cpus from going down. */
369 spin_lock(&call_lock
);
371 allbutself
= cpu_online_map
;
372 cpu_clear(smp_processor_id(), allbutself
);
374 cpus_and(mask
, mask
, allbutself
);
375 cpus
= cpus_weight(mask
);
378 spin_unlock(&call_lock
);
384 atomic_set(&data
.started
, 0);
387 atomic_set(&data
.finished
, 0);
392 /* Send a message to other CPUs */
393 if (cpus_equal(mask
, allbutself
))
394 send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
396 send_IPI_mask(mask
, CALL_FUNCTION_VECTOR
);
398 /* Wait for response */
399 while (atomic_read(&data
.started
) != cpus
)
403 while (atomic_read(&data
.finished
) != cpus
)
406 spin_unlock(&call_lock
);
411 static void stop_this_cpu(void *dummy
)
417 cpu_clear(smp_processor_id(), cpu_online_map
);
418 disable_local_APIC();
423 void smp_send_stop(void)
431 /* Don't deadlock on the call lock in panic */
432 nolock
= !spin_trylock(&call_lock
);
433 local_irq_save(flags
);
434 __smp_call_function(stop_this_cpu
, NULL
, 0, 0);
436 spin_unlock(&call_lock
);
437 disable_local_APIC();
438 local_irq_restore(flags
);
442 * Reschedule call back. Nothing to do,
443 * all the work is done automatically when
444 * we return from the interrupt.
446 asmlinkage
void smp_reschedule_interrupt(void)
449 add_pda(irq_resched_count
, 1);
452 asmlinkage
void smp_call_function_interrupt(void)
454 void (*func
) (void *info
) = call_data
->func
;
455 void *info
= call_data
->info
;
456 int wait
= call_data
->wait
;
460 * Notify initiating CPU that I've grabbed the data and am
461 * about to execute the function
464 atomic_inc(&call_data
->started
);
466 * At this point the info structure may be out of scope unless wait==1
471 add_pda(irq_call_count
, 1);
475 atomic_inc(&call_data
->finished
);
479 struct smp_ops smp_ops
= {
480 .smp_prepare_boot_cpu
= native_smp_prepare_boot_cpu
,
481 .smp_prepare_cpus
= native_smp_prepare_cpus
,
482 .smp_cpus_done
= native_smp_cpus_done
,
484 .smp_send_reschedule
= native_smp_send_reschedule
,
485 .smp_call_function_mask
= native_smp_call_function_mask
,
486 .cpu_up
= native_cpu_up
,
488 EXPORT_SYMBOL_GPL(smp_ops
);