2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
7 * This code is released under the GNU public license version 2 or
11 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp_lock.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
22 #include <asm/pgalloc.h>
25 * Some notes on x86 processor bugs affecting SMP operation:
27 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
28 * The Linux implications for SMP are handled as follows:
30 * Pentium III / [Xeon]
31 * None of the E1AP-E3AP erratas are visible to the user.
38 * None of the A1AP-A3AP erratas are visible to the user.
45 * None of 1AP-9AP erratas are visible to the normal user,
46 * except occasional delivery of 'spurious interrupt' as trap #15.
47 * This is very rare and a non-problem.
49 * 1AP. Linux maps APIC as non-cacheable
50 * 2AP. worked around in hardware
51 * 3AP. fixed in C0 and above steppings microcode update.
52 * Linux does not use excessive STARTUP_IPIs.
53 * 4AP. worked around in hardware
54 * 5AP. symmetric IO mode (normal Linux operation) not affected.
55 * 'noapic' mode has vector 0xf filled out properly.
56 * 6AP. 'noapic' mode might be affected - fixed in later steppings
57 * 7AP. We do not assume writes to the LVT deassering IRQs
58 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
59 * 9AP. We do not use mixed mode
62 * There is a marginal case where REP MOVS on 100MHz SMP
63 * machines with B stepping processors can fail. XXX should provide
64 * an L1cache=Writethrough or L1cache=off option.
66 * B stepping CPUs may hang. There are hardware work arounds
67 * for this. We warn about it in case your board doesnt have the work
68 * arounds. Basically thats so I can tell anyone with a B stepping
69 * CPU and SMP problems "tough".
71 * Specific items [From Pentium Processor Specification Update]
73 * 1AP. Linux doesn't use remote read
74 * 2AP. Linux doesn't trust APIC errors
75 * 3AP. We work around this
76 * 4AP. Linux never generated 3 interrupts of the same priority
77 * to cause a lost local interrupt.
78 * 5AP. Remote read is never used
79 * 6AP. not affected - worked around in hardware
80 * 7AP. not affected - worked around in hardware
81 * 8AP. worked around in hardware - we get explicit CS errors if not
82 * 9AP. only 'noapic' mode affected. Might generate spurious
83 * interrupts, we log only the first one and count the
85 * 10AP. not affected - worked around in hardware
86 * 11AP. Linux reads the APIC between writes to avoid this, as per
87 * the documentation. Make sure you preserve this as it affects
88 * the C stepping chips too.
89 * 12AP. not affected - worked around in hardware
90 * 13AP. not affected - worked around in hardware
91 * 14AP. we always deassert INIT during bootup
92 * 15AP. not affected - worked around in hardware
93 * 16AP. not affected - worked around in hardware
94 * 17AP. not affected - worked around in hardware
95 * 18AP. not affected - worked around in hardware
96 * 19AP. not affected - worked around in BIOS
98 * If this sounds worrying believe me these bugs are either ___RARE___,
99 * or are signal timing bugs worked around in hardware and there's
100 * about nothing of note with C stepping upwards.
103 /* The 'big kernel lock' */
104 spinlock_t kernel_flag
= SPIN_LOCK_UNLOCKED
;
106 struct tlb_state cpu_tlbstate
[NR_CPUS
] = {[0 ... NR_CPUS
-1] = { &init_mm
, 0 }};
109 * the following functions deal with sending IPIs between CPUs.
111 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
114 static inline int __prepare_ICR (unsigned int shortcut
, int vector
)
116 return APIC_DM_FIXED
| shortcut
| vector
| APIC_DEST_LOGICAL
;
119 static inline int __prepare_ICR2 (unsigned int mask
)
121 return SET_APIC_DEST_FIELD(mask
);
124 static inline void __send_IPI_shortcut(unsigned int shortcut
, int vector
)
127 * Subtle. In the case of the 'never do double writes' workaround
128 * we have to lock out interrupts to be safe. As we don't care
129 * of the value read we use an atomic rmw access to avoid costly
130 * cli/sti. Otherwise we use an even cheaper single atomic write
138 apic_wait_icr_idle();
141 * No need to touch the target chip field
143 cfg
= __prepare_ICR(shortcut
, vector
);
146 * Send the IPI. The write to APIC_ICR fires this off.
148 apic_write_around(APIC_ICR
, cfg
);
151 static inline void send_IPI_allbutself(int vector
)
154 * if there are no other CPUs in the system then
155 * we get an APIC send error if we try to broadcast.
156 * thus we have to avoid sending IPIs in this case.
158 if (smp_num_cpus
> 1)
159 __send_IPI_shortcut(APIC_DEST_ALLBUT
, vector
);
162 static inline void send_IPI_all(int vector
)
164 __send_IPI_shortcut(APIC_DEST_ALLINC
, vector
);
167 void send_IPI_self(int vector
)
169 __send_IPI_shortcut(APIC_DEST_SELF
, vector
);
172 static inline void send_IPI_mask(int mask
, int vector
)
183 apic_wait_icr_idle();
186 * prepare target chip field
188 cfg
= __prepare_ICR2(mask
);
189 apic_write_around(APIC_ICR2
, cfg
);
194 cfg
= __prepare_ICR(0, vector
);
197 * Send the IPI. The write to APIC_ICR fires this off.
199 apic_write_around(APIC_ICR
, cfg
);
200 __restore_flags(flags
);
204 * Smarter SMP flushing macros.
205 * c/o Linus Torvalds.
207 * These mean you can really definitely utterly forget about
208 * writing to user space from interrupts. (Its not allowed anyway).
210 * Optimizations Manfred Spraul <manfred@colorfullife.com>
213 static volatile unsigned long flush_cpumask
;
214 static struct mm_struct
* flush_mm
;
215 static unsigned long flush_va
;
216 static spinlock_t tlbstate_lock
= SPIN_LOCK_UNLOCKED
;
217 #define FLUSH_ALL 0xffffffff
220 * We cannot call mmdrop() because we are in interrupt context,
221 * instead update mm->cpu_vm_mask.
223 static void inline leave_mm (unsigned long cpu
)
225 if (cpu_tlbstate
[cpu
].state
== TLBSTATE_OK
)
227 clear_bit(cpu
, &cpu_tlbstate
[cpu
].active_mm
->cpu_vm_mask
);
232 * The flush IPI assumes that a thread switch happens in this order:
233 * [cpu0: the cpu that switches]
234 * 1) switch_mm() either 1a) or 1b)
235 * 1a) thread switch to a different mm
236 * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
237 * Stop ipi delivery for the old mm. This is not synchronized with
238 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
239 * for the wrong mm, and in the worst case we perform a superflous
241 * 1a2) set cpu_tlbstate to TLBSTATE_OK
242 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
243 * was in lazy tlb mode.
244 * 1a3) update cpu_tlbstate[].active_mm
245 * Now cpu0 accepts tlb flushes for the new mm.
246 * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
247 * Now the other cpus will send tlb flush ipis.
249 * 1b) thread switch without mm change
250 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
252 * 1b1) set cpu_tlbstate to TLBSTATE_OK
253 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
254 * Atomically set the bit [other cpus will start sending flush ipis],
256 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
257 * 2) switch %%esp, ie current
259 * The interrupt must handle 2 special cases:
260 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
261 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
262 * runs in kernel space, the cpu could load tlb entries for user space
265 * The good news is that cpu_tlbstate is local to each cpu, no
266 * write/read ordering problems.
272 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
273 * 2) Leave the mm if we are in the lazy tlb mode.
276 asmlinkage
void smp_invalidate_interrupt (void)
278 unsigned long cpu
= smp_processor_id();
280 if (!test_bit(cpu
, &flush_cpumask
))
282 if (flush_mm
== cpu_tlbstate
[cpu
].active_mm
) {
283 if (cpu_tlbstate
[cpu
].state
== TLBSTATE_OK
) {
284 if (flush_va
== FLUSH_ALL
)
287 __flush_tlb_one(flush_va
);
292 clear_bit(cpu
, &flush_cpumask
);
295 static void flush_tlb_others (unsigned long cpumask
, struct mm_struct
*mm
,
299 * A couple of (to be removed) sanity checks:
301 * - we do not send IPIs to not-yet booted CPUs.
302 * - current CPU must not be in mask
303 * - mask must exist :)
307 if ((cpumask
& cpu_online_map
) != cpumask
)
309 if (cpumask
& (1 << smp_processor_id()))
315 * i'm not happy about this global shared spinlock in the
316 * MM hot path, but we'll see how contended it is.
317 * Temporarily this turns IRQs off, so that lockups are
318 * detected by the NMI watchdog.
320 spin_lock(&tlbstate_lock
);
324 atomic_set_mask(cpumask
, &flush_cpumask
);
326 * We have to send the IPI only to
329 send_IPI_mask(cpumask
, INVALIDATE_TLB_VECTOR
);
331 while (flush_cpumask
)
332 /* nothing. lockup detection does not belong here */;
336 spin_unlock(&tlbstate_lock
);
339 void flush_tlb_current_task(void)
341 struct mm_struct
*mm
= current
->mm
;
342 unsigned long cpu_mask
= mm
->cpu_vm_mask
& ~(1 << smp_processor_id());
346 flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
349 void flush_tlb_mm (struct mm_struct
* mm
)
351 unsigned long cpu_mask
= mm
->cpu_vm_mask
& ~(1 << smp_processor_id());
353 if (current
->active_mm
== mm
) {
357 leave_mm(smp_processor_id());
360 flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
363 void flush_tlb_page(struct vm_area_struct
* vma
, unsigned long va
)
365 struct mm_struct
*mm
= vma
->vm_mm
;
366 unsigned long cpu_mask
= mm
->cpu_vm_mask
& ~(1 << smp_processor_id());
368 if (current
->active_mm
== mm
) {
372 leave_mm(smp_processor_id());
376 flush_tlb_others(cpu_mask
, mm
, va
);
379 static inline void do_flush_tlb_all_local(void)
381 unsigned long cpu
= smp_processor_id();
384 if (cpu_tlbstate
[cpu
].state
== TLBSTATE_LAZY
)
388 static void flush_tlb_all_ipi(void* info
)
390 do_flush_tlb_all_local();
393 void flush_tlb_all(void)
395 smp_call_function (flush_tlb_all_ipi
,0,1,1);
397 do_flush_tlb_all_local();
401 * this function sends a 'reschedule' IPI to another CPU.
402 * it goes straight through and wastes no time serializing
403 * anything. Worst case is that we lose a reschedule ...
406 void smp_send_reschedule(int cpu
)
408 send_IPI_mask(1 << cpu
, RESCHEDULE_VECTOR
);
412 * Structure and data for smp_call_function(). This is designed to minimise
413 * static memory requirements. It also looks cleaner.
415 static spinlock_t call_lock
= SPIN_LOCK_UNLOCKED
;
417 struct call_data_struct
{
418 void (*func
) (void *info
);
425 static struct call_data_struct
* call_data
;
428 * this function sends a 'generic call function' IPI to all other CPUs
432 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
435 * [SUMMARY] Run a function on all other CPUs.
436 * <func> The function to run. This must be fast and non-blocking.
437 * <info> An arbitrary pointer to pass to the function.
438 * <nonatomic> currently unused.
439 * <wait> If true, wait (atomically) until function has completed on other CPUs.
440 * [RETURNS] 0 on success, else a negative status code. Does not return until
441 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
443 * You must not call this function with disabled interrupts or from a
444 * hardware interrupt handler, you may call it from a bottom half handler.
447 struct call_data_struct data
;
448 int cpus
= smp_num_cpus
-1;
455 atomic_set(&data
.started
, 0);
458 atomic_set(&data
.finished
, 0);
460 spin_lock_bh(&call_lock
);
462 /* Send a message to all other CPUs and wait for them to respond */
463 send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
465 /* Wait for response */
466 while (atomic_read(&data
.started
) != cpus
)
470 while (atomic_read(&data
.finished
) != cpus
)
472 spin_unlock_bh(&call_lock
);
477 static void stop_this_cpu (void * dummy
)
482 clear_bit(smp_processor_id(), &cpu_online_map
);
484 disable_local_APIC();
485 if (cpu_data
[smp_processor_id()].hlt_works_ok
)
486 for(;;) __asm__("hlt");
491 * this function calls the 'stop' function on all other CPUs in the system.
494 void smp_send_stop(void)
496 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
500 disable_local_APIC();
505 * Reschedule call back. Nothing to do,
506 * all the work is done automatically when
507 * we return from the interrupt.
509 asmlinkage
void smp_reschedule_interrupt(void)
514 asmlinkage
void smp_call_function_interrupt(void)
516 void (*func
) (void *info
) = call_data
->func
;
517 void *info
= call_data
->info
;
518 int wait
= call_data
->wait
;
522 * Notify initiating CPU that I've grabbed the data and am
523 * about to execute the function
525 atomic_inc(&call_data
->started
);
527 * At this point the info structure may be out of scope unless wait==1
531 atomic_inc(&call_data
->finished
);