Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / arch / i386 / kernel / smp.c
blob6607edf8a20d02d1f55a7f3f36976a8b94e71f72
1 /*
2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
7 * This code is released under the GNU public license version 2 or
8 * later.
9 */
11 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/irq.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp_lock.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
21 #include <asm/mtrr.h>
22 #include <asm/pgalloc.h>
25 * Some notes on x86 processor bugs affecting SMP operation:
27 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
28 * The Linux implications for SMP are handled as follows:
30 * Pentium III / [Xeon]
31 * None of the E1AP-E3AP erratas are visible to the user.
33 * E1AP. see PII A1AP
34 * E2AP. see PII A2AP
35 * E3AP. see PII A3AP
37 * Pentium II / [Xeon]
38 * None of the A1AP-A3AP erratas are visible to the user.
40 * A1AP. see PPro 1AP
41 * A2AP. see PPro 2AP
42 * A3AP. see PPro 7AP
44 * Pentium Pro
45 * None of 1AP-9AP erratas are visible to the normal user,
46 * except occasional delivery of 'spurious interrupt' as trap #15.
47 * This is very rare and a non-problem.
49 * 1AP. Linux maps APIC as non-cacheable
50 * 2AP. worked around in hardware
51 * 3AP. fixed in C0 and above steppings microcode update.
52 * Linux does not use excessive STARTUP_IPIs.
53 * 4AP. worked around in hardware
54 * 5AP. symmetric IO mode (normal Linux operation) not affected.
55 * 'noapic' mode has vector 0xf filled out properly.
56 * 6AP. 'noapic' mode might be affected - fixed in later steppings
57 * 7AP. We do not assume writes to the LVT deassering IRQs
58 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
59 * 9AP. We do not use mixed mode
61 * Pentium
62 * There is a marginal case where REP MOVS on 100MHz SMP
63 * machines with B stepping processors can fail. XXX should provide
64 * an L1cache=Writethrough or L1cache=off option.
66 * B stepping CPUs may hang. There are hardware work arounds
67 * for this. We warn about it in case your board doesnt have the work
68 * arounds. Basically thats so I can tell anyone with a B stepping
69 * CPU and SMP problems "tough".
71 * Specific items [From Pentium Processor Specification Update]
73 * 1AP. Linux doesn't use remote read
74 * 2AP. Linux doesn't trust APIC errors
75 * 3AP. We work around this
76 * 4AP. Linux never generated 3 interrupts of the same priority
77 * to cause a lost local interrupt.
78 * 5AP. Remote read is never used
79 * 6AP. not affected - worked around in hardware
80 * 7AP. not affected - worked around in hardware
81 * 8AP. worked around in hardware - we get explicit CS errors if not
82 * 9AP. only 'noapic' mode affected. Might generate spurious
83 * interrupts, we log only the first one and count the
84 * rest silently.
85 * 10AP. not affected - worked around in hardware
86 * 11AP. Linux reads the APIC between writes to avoid this, as per
87 * the documentation. Make sure you preserve this as it affects
88 * the C stepping chips too.
89 * 12AP. not affected - worked around in hardware
90 * 13AP. not affected - worked around in hardware
91 * 14AP. we always deassert INIT during bootup
92 * 15AP. not affected - worked around in hardware
93 * 16AP. not affected - worked around in hardware
94 * 17AP. not affected - worked around in hardware
95 * 18AP. not affected - worked around in hardware
96 * 19AP. not affected - worked around in BIOS
98 * If this sounds worrying believe me these bugs are either ___RARE___,
99 * or are signal timing bugs worked around in hardware and there's
100 * about nothing of note with C stepping upwards.
103 /* The 'big kernel lock' */
104 spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
106 struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
109 * the following functions deal with sending IPIs between CPUs.
111 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
114 static inline int __prepare_ICR (unsigned int shortcut, int vector)
116 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
119 static inline int __prepare_ICR2 (unsigned int mask)
121 return SET_APIC_DEST_FIELD(mask);
124 static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
127 * Subtle. In the case of the 'never do double writes' workaround
128 * we have to lock out interrupts to be safe. As we don't care
129 * of the value read we use an atomic rmw access to avoid costly
130 * cli/sti. Otherwise we use an even cheaper single atomic write
131 * to the APIC.
133 unsigned int cfg;
136 * Wait for idle.
138 apic_wait_icr_idle();
141 * No need to touch the target chip field
143 cfg = __prepare_ICR(shortcut, vector);
146 * Send the IPI. The write to APIC_ICR fires this off.
148 apic_write_around(APIC_ICR, cfg);
151 static inline void send_IPI_allbutself(int vector)
154 * if there are no other CPUs in the system then
155 * we get an APIC send error if we try to broadcast.
156 * thus we have to avoid sending IPIs in this case.
158 if (smp_num_cpus > 1)
159 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
162 static inline void send_IPI_all(int vector)
164 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
167 void send_IPI_self(int vector)
169 __send_IPI_shortcut(APIC_DEST_SELF, vector);
172 static inline void send_IPI_mask(int mask, int vector)
174 unsigned long cfg;
175 unsigned long flags;
177 __save_flags(flags);
178 __cli();
181 * Wait for idle.
183 apic_wait_icr_idle();
186 * prepare target chip field
188 cfg = __prepare_ICR2(mask);
189 apic_write_around(APIC_ICR2, cfg);
192 * program the ICR
194 cfg = __prepare_ICR(0, vector);
197 * Send the IPI. The write to APIC_ICR fires this off.
199 apic_write_around(APIC_ICR, cfg);
200 __restore_flags(flags);
204 * Smarter SMP flushing macros.
205 * c/o Linus Torvalds.
207 * These mean you can really definitely utterly forget about
208 * writing to user space from interrupts. (Its not allowed anyway).
210 * Optimizations Manfred Spraul <manfred@colorfullife.com>
213 static volatile unsigned long flush_cpumask;
214 static struct mm_struct * flush_mm;
215 static unsigned long flush_va;
216 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
217 #define FLUSH_ALL 0xffffffff
220 * We cannot call mmdrop() because we are in interrupt context,
221 * instead update mm->cpu_vm_mask.
223 static void inline leave_mm (unsigned long cpu)
225 if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
226 BUG();
227 clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
232 * The flush IPI assumes that a thread switch happens in this order:
233 * [cpu0: the cpu that switches]
234 * 1) switch_mm() either 1a) or 1b)
235 * 1a) thread switch to a different mm
236 * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
237 * Stop ipi delivery for the old mm. This is not synchronized with
238 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
239 * for the wrong mm, and in the worst case we perform a superflous
240 * tlb flush.
241 * 1a2) set cpu_tlbstate to TLBSTATE_OK
242 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
243 * was in lazy tlb mode.
244 * 1a3) update cpu_tlbstate[].active_mm
245 * Now cpu0 accepts tlb flushes for the new mm.
246 * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
247 * Now the other cpus will send tlb flush ipis.
248 * 1a4) change cr3.
249 * 1b) thread switch without mm change
250 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
251 * flush ipis.
252 * 1b1) set cpu_tlbstate to TLBSTATE_OK
253 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
254 * Atomically set the bit [other cpus will start sending flush ipis],
255 * and test the bit.
256 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
257 * 2) switch %%esp, ie current
259 * The interrupt must handle 2 special cases:
260 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
261 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
262 * runs in kernel space, the cpu could load tlb entries for user space
263 * pages.
265 * The good news is that cpu_tlbstate is local to each cpu, no
266 * write/read ordering problems.
270 * TLB flush IPI:
272 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
273 * 2) Leave the mm if we are in the lazy tlb mode.
276 asmlinkage void smp_invalidate_interrupt (void)
278 unsigned long cpu = smp_processor_id();
280 if (!test_bit(cpu, &flush_cpumask))
281 return;
283 * This was a BUG() but until someone can quote me the
284 * line from the intel manual that guarantees an IPI to
285 * multiple CPUs is retried _only_ on the erroring CPUs
286 * its staying as a return
288 * BUG();
291 if (flush_mm == cpu_tlbstate[cpu].active_mm) {
292 if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
293 if (flush_va == FLUSH_ALL)
294 local_flush_tlb();
295 else
296 __flush_tlb_one(flush_va);
297 } else
298 leave_mm(cpu);
300 ack_APIC_irq();
301 clear_bit(cpu, &flush_cpumask);
304 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
305 unsigned long va)
308 * A couple of (to be removed) sanity checks:
310 * - we do not send IPIs to not-yet booted CPUs.
311 * - current CPU must not be in mask
312 * - mask must exist :)
314 if (!cpumask)
315 BUG();
316 if ((cpumask & cpu_online_map) != cpumask)
317 BUG();
318 if (cpumask & (1 << smp_processor_id()))
319 BUG();
320 if (!mm)
321 BUG();
324 * i'm not happy about this global shared spinlock in the
325 * MM hot path, but we'll see how contended it is.
326 * Temporarily this turns IRQs off, so that lockups are
327 * detected by the NMI watchdog.
329 spin_lock(&tlbstate_lock);
331 flush_mm = mm;
332 flush_va = va;
333 atomic_set_mask(cpumask, &flush_cpumask);
335 * We have to send the IPI only to
336 * CPUs affected.
338 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
340 while (flush_cpumask)
341 /* nothing. lockup detection does not belong here */;
343 flush_mm = NULL;
344 flush_va = 0;
345 spin_unlock(&tlbstate_lock);
348 void flush_tlb_current_task(void)
350 struct mm_struct *mm = current->mm;
351 unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
353 local_flush_tlb();
354 if (cpu_mask)
355 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
358 void flush_tlb_mm (struct mm_struct * mm)
360 unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
362 if (current->active_mm == mm) {
363 if (current->mm)
364 local_flush_tlb();
365 else
366 leave_mm(smp_processor_id());
368 if (cpu_mask)
369 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
372 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
374 struct mm_struct *mm = vma->vm_mm;
375 unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
377 if (current->active_mm == mm) {
378 if(current->mm)
379 __flush_tlb_one(va);
380 else
381 leave_mm(smp_processor_id());
384 if (cpu_mask)
385 flush_tlb_others(cpu_mask, mm, va);
388 static inline void do_flush_tlb_all_local(void)
390 unsigned long cpu = smp_processor_id();
392 __flush_tlb_all();
393 if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
394 leave_mm(cpu);
397 static void flush_tlb_all_ipi(void* info)
399 do_flush_tlb_all_local();
402 void flush_tlb_all(void)
404 smp_call_function (flush_tlb_all_ipi,0,1,1);
406 do_flush_tlb_all_local();
410 * this function sends a 'reschedule' IPI to another CPU.
411 * it goes straight through and wastes no time serializing
412 * anything. Worst case is that we lose a reschedule ...
415 void smp_send_reschedule(int cpu)
417 send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
421 * Structure and data for smp_call_function(). This is designed to minimise
422 * static memory requirements. It also looks cleaner.
424 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
426 struct call_data_struct {
427 void (*func) (void *info);
428 void *info;
429 atomic_t started;
430 atomic_t finished;
431 int wait;
434 static struct call_data_struct * call_data;
437 * this function sends a 'generic call function' IPI to all other CPUs
438 * in the system.
441 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
442 int wait)
444 * [SUMMARY] Run a function on all other CPUs.
445 * <func> The function to run. This must be fast and non-blocking.
446 * <info> An arbitrary pointer to pass to the function.
447 * <nonatomic> currently unused.
448 * <wait> If true, wait (atomically) until function has completed on other CPUs.
449 * [RETURNS] 0 on success, else a negative status code. Does not return until
450 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
452 * You must not call this function with disabled interrupts or from a
453 * hardware interrupt handler, you may call it from a bottom half handler.
456 struct call_data_struct data;
457 int cpus = smp_num_cpus-1;
459 if (!cpus)
460 return 0;
462 data.func = func;
463 data.info = info;
464 atomic_set(&data.started, 0);
465 data.wait = wait;
466 if (wait)
467 atomic_set(&data.finished, 0);
469 spin_lock_bh(&call_lock);
470 call_data = &data;
471 /* Send a message to all other CPUs and wait for them to respond */
472 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
474 /* Wait for response */
475 while (atomic_read(&data.started) != cpus)
476 barrier();
478 if (wait)
479 while (atomic_read(&data.finished) != cpus)
480 barrier();
481 spin_unlock_bh(&call_lock);
483 return 0;
486 static void stop_this_cpu (void * dummy)
489 * Remove this CPU:
491 clear_bit(smp_processor_id(), &cpu_online_map);
492 __cli();
493 disable_local_APIC();
494 if (cpu_data[smp_processor_id()].hlt_works_ok)
495 for(;;) __asm__("hlt");
496 for (;;);
500 * this function calls the 'stop' function on all other CPUs in the system.
503 void smp_send_stop(void)
505 smp_call_function(stop_this_cpu, NULL, 1, 0);
506 smp_num_cpus = 1;
508 __cli();
509 disable_local_APIC();
510 __sti();
514 * Reschedule call back. Nothing to do,
515 * all the work is done automatically when
516 * we return from the interrupt.
518 asmlinkage void smp_reschedule_interrupt(void)
520 ack_APIC_irq();
523 asmlinkage void smp_call_function_interrupt(void)
525 void (*func) (void *info) = call_data->func;
526 void *info = call_data->info;
527 int wait = call_data->wait;
529 ack_APIC_irq();
531 * Notify initiating CPU that I've grabbed the data and am
532 * about to execute the function
534 atomic_inc(&call_data->started);
536 * At this point the info structure may be out of scope unless wait==1
538 (*func)(info);
539 if (wait)
540 atomic_inc(&call_data->finished);