2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/interrupt.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mach_apic.h>
26 #include <asm/mmu_context.h>
27 #include <asm/proto.h>
28 #include <asm/apicdef.h>
32 * Smarter SMP flushing macros.
35 * These mean you can really definitely utterly forget about
36 * writing to user space from interrupts. (Its not allowed anyway).
38 * Optimizations Manfred Spraul <manfred@colorfullife.com>
40 * More scalable flush, from Andi Kleen
42 * To avoid global state use 8 different call vectors.
43 * Each CPU uses a specific vector to trigger flushes on other
44 * CPUs. Depending on the received vector the target CPUs look into
45 * the right per cpu variable for the flush data.
47 * With more than 8 CPUs they are hashed to the 8 available
48 * vectors. The limited global vector space forces us to this right now.
49 * In future when interrupts are split into per CPU domains this could be
50 * fixed, at the cost of triggering multiple IPIs in some cases.
53 union smp_flush_state
{
55 cpumask_t flush_cpumask
;
56 struct mm_struct
*flush_mm
;
57 unsigned long flush_va
;
58 spinlock_t tlbstate_lock
;
60 char pad
[SMP_CACHE_BYTES
];
61 } ____cacheline_aligned
;
63 /* State is put into the per CPU data section, but padded
64 to a full cache line because other CPUs can access it and we don't
65 want false sharing in the per cpu data segment. */
66 static DEFINE_PER_CPU(union smp_flush_state
, flush_state
);
69 * We cannot call mmdrop() because we are in interrupt context,
70 * instead update mm->cpu_vm_mask.
72 static inline void leave_mm(int cpu
)
74 if (read_pda(mmu_state
) == TLBSTATE_OK
)
76 cpu_clear(cpu
, read_pda(active_mm
)->cpu_vm_mask
);
77 load_cr3(swapper_pg_dir
);
82 * The flush IPI assumes that a thread switch happens in this order:
83 * [cpu0: the cpu that switches]
84 * 1) switch_mm() either 1a) or 1b)
85 * 1a) thread switch to a different mm
86 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
87 * Stop ipi delivery for the old mm. This is not synchronized with
88 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
89 * for the wrong mm, and in the worst case we perform a superfluous
91 * 1a2) set cpu mmu_state to TLBSTATE_OK
92 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
93 * was in lazy tlb mode.
94 * 1a3) update cpu active_mm
95 * Now cpu0 accepts tlb flushes for the new mm.
96 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
97 * Now the other cpus will send tlb flush ipis.
99 * 1b) thread switch without mm change
100 * cpu active_mm is correct, cpu0 already handles
102 * 1b1) set cpu mmu_state to TLBSTATE_OK
103 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
104 * Atomically set the bit [other cpus will start sending flush ipis],
106 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
107 * 2) switch %%esp, ie current
109 * The interrupt must handle 2 special cases:
110 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
111 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
112 * runs in kernel space, the cpu could load tlb entries for user space
115 * The good news is that cpu mmu_state is local to each cpu, no
116 * write/read ordering problems.
122 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
123 * 2) Leave the mm if we are in the lazy tlb mode.
125 * Interrupts are disabled.
128 asmlinkage
void smp_invalidate_interrupt(struct pt_regs
*regs
)
132 union smp_flush_state
*f
;
134 cpu
= smp_processor_id();
136 * orig_rax contains the negated interrupt vector.
137 * Use that to determine where the sender put the data.
139 sender
= ~regs
->orig_ax
- INVALIDATE_TLB_VECTOR_START
;
140 f
= &per_cpu(flush_state
, sender
);
142 if (!cpu_isset(cpu
, f
->flush_cpumask
))
145 * This was a BUG() but until someone can quote me the
146 * line from the intel manual that guarantees an IPI to
147 * multiple CPUs is retried _only_ on the erroring CPUs
148 * its staying as a return
153 if (f
->flush_mm
== read_pda(active_mm
)) {
154 if (read_pda(mmu_state
) == TLBSTATE_OK
) {
155 if (f
->flush_va
== TLB_FLUSH_ALL
)
158 __flush_tlb_one(f
->flush_va
);
164 cpu_clear(cpu
, f
->flush_cpumask
);
165 add_pda(irq_tlb_count
, 1);
168 void native_flush_tlb_others(const cpumask_t
*cpumaskp
, struct mm_struct
*mm
,
172 union smp_flush_state
*f
;
173 cpumask_t cpumask
= *cpumaskp
;
175 /* Caller has disabled preemption */
176 sender
= smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS
;
177 f
= &per_cpu(flush_state
, sender
);
180 * Could avoid this lock when
181 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
182 * probably not worth checking this for a cache-hot lock.
184 spin_lock(&f
->tlbstate_lock
);
188 cpus_or(f
->flush_cpumask
, cpumask
, f
->flush_cpumask
);
191 * We have to send the IPI only to
194 send_IPI_mask(cpumask
, INVALIDATE_TLB_VECTOR_START
+ sender
);
196 while (!cpus_empty(f
->flush_cpumask
))
201 spin_unlock(&f
->tlbstate_lock
);
204 int __cpuinit
init_smp_flush(void)
208 for_each_cpu_mask(i
, cpu_possible_map
) {
209 spin_lock_init(&per_cpu(flush_state
, i
).tlbstate_lock
);
213 core_initcall(init_smp_flush
);
215 void flush_tlb_current_task(void)
217 struct mm_struct
*mm
= current
->mm
;
221 cpu_mask
= mm
->cpu_vm_mask
;
222 cpu_clear(smp_processor_id(), cpu_mask
);
225 if (!cpus_empty(cpu_mask
))
226 flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
230 void flush_tlb_mm (struct mm_struct
* mm
)
235 cpu_mask
= mm
->cpu_vm_mask
;
236 cpu_clear(smp_processor_id(), cpu_mask
);
238 if (current
->active_mm
== mm
) {
242 leave_mm(smp_processor_id());
244 if (!cpus_empty(cpu_mask
))
245 flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
250 void flush_tlb_page(struct vm_area_struct
* vma
, unsigned long va
)
252 struct mm_struct
*mm
= vma
->vm_mm
;
256 cpu_mask
= mm
->cpu_vm_mask
;
257 cpu_clear(smp_processor_id(), cpu_mask
);
259 if (current
->active_mm
== mm
) {
263 leave_mm(smp_processor_id());
266 if (!cpus_empty(cpu_mask
))
267 flush_tlb_others(cpu_mask
, mm
, va
);
272 static void do_flush_tlb_all(void* info
)
274 unsigned long cpu
= smp_processor_id();
277 if (read_pda(mmu_state
) == TLBSTATE_LAZY
)
281 void flush_tlb_all(void)
283 on_each_cpu(do_flush_tlb_all
, NULL
, 1, 1);
287 * this function sends a 'reschedule' IPI to another CPU.
288 * it goes straight through and wastes no time serializing
289 * anything. Worst case is that we lose a reschedule ...
292 void smp_send_reschedule(int cpu
)
294 send_IPI_mask(cpumask_of_cpu(cpu
), RESCHEDULE_VECTOR
);
298 * Structure and data for smp_call_function(). This is designed to minimise
299 * static memory requirements. It also looks cleaner.
301 static DEFINE_SPINLOCK(call_lock
);
303 struct call_data_struct
{
304 void (*func
) (void *info
);
311 static struct call_data_struct
* call_data
;
313 void lock_ipi_call_lock(void)
315 spin_lock_irq(&call_lock
);
318 void unlock_ipi_call_lock(void)
320 spin_unlock_irq(&call_lock
);
324 * this function sends a 'generic call function' IPI to all other CPU
325 * of the system defined in the mask.
327 static int __smp_call_function_mask(cpumask_t mask
,
328 void (*func
)(void *), void *info
,
331 struct call_data_struct data
;
332 cpumask_t allbutself
;
335 allbutself
= cpu_online_map
;
336 cpu_clear(smp_processor_id(), allbutself
);
338 cpus_and(mask
, mask
, allbutself
);
339 cpus
= cpus_weight(mask
);
346 atomic_set(&data
.started
, 0);
349 atomic_set(&data
.finished
, 0);
354 /* Send a message to other CPUs */
355 if (cpus_equal(mask
, allbutself
))
356 send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
358 send_IPI_mask(mask
, CALL_FUNCTION_VECTOR
);
360 /* Wait for response */
361 while (atomic_read(&data
.started
) != cpus
)
367 while (atomic_read(&data
.finished
) != cpus
)
373 * smp_call_function_mask(): Run a function on a set of other CPUs.
374 * @mask: The set of cpus to run on. Must not include the current cpu.
375 * @func: The function to run. This must be fast and non-blocking.
376 * @info: An arbitrary pointer to pass to the function.
377 * @wait: If true, wait (atomically) until function has completed on other CPUs.
379 * Returns 0 on success, else a negative status code.
381 * If @wait is true, then returns once @func has returned; otherwise
382 * it returns just before the target cpu calls @func.
384 * You must not call this function with disabled interrupts or from a
385 * hardware interrupt handler or from a bottom half handler.
387 int smp_call_function_mask(cpumask_t mask
,
388 void (*func
)(void *), void *info
,
393 /* Can deadlock when called with interrupts disabled */
394 WARN_ON(irqs_disabled());
396 spin_lock(&call_lock
);
397 ret
= __smp_call_function_mask(mask
, func
, info
, wait
);
398 spin_unlock(&call_lock
);
401 EXPORT_SYMBOL(smp_call_function_mask
);
404 * smp_call_function_single - Run a function on a specific CPU
405 * @func: The function to run. This must be fast and non-blocking.
406 * @info: An arbitrary pointer to pass to the function.
407 * @nonatomic: Currently unused.
408 * @wait: If true, wait until function has completed on other CPUs.
410 * Retrurns 0 on success, else a negative status code.
412 * Does not return until the remote CPU is nearly ready to execute <func>
413 * or is or has executed.
416 int smp_call_function_single (int cpu
, void (*func
) (void *info
), void *info
,
417 int nonatomic
, int wait
)
419 /* prevent preemption and reschedule on another processor */
420 int ret
, me
= get_cpu();
422 /* Can deadlock when called with interrupts disabled */
423 WARN_ON(irqs_disabled());
433 ret
= smp_call_function_mask(cpumask_of_cpu(cpu
), func
, info
, wait
);
438 EXPORT_SYMBOL(smp_call_function_single
);
441 * smp_call_function - run a function on all other CPUs.
442 * @func: The function to run. This must be fast and non-blocking.
443 * @info: An arbitrary pointer to pass to the function.
444 * @nonatomic: currently unused.
445 * @wait: If true, wait (atomically) until function has completed on other
448 * Returns 0 on success, else a negative status code. Does not return until
449 * remote CPUs are nearly ready to execute func or are or have executed.
451 * You must not call this function with disabled interrupts or from a
452 * hardware interrupt handler or from a bottom half handler.
453 * Actually there are a few legal cases, like panic.
455 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
458 return smp_call_function_mask(cpu_online_map
, func
, info
, wait
);
460 EXPORT_SYMBOL(smp_call_function
);
462 static void stop_this_cpu(void *dummy
)
468 cpu_clear(smp_processor_id(), cpu_online_map
);
469 disable_local_APIC();
474 void smp_send_stop(void)
482 /* Don't deadlock on the call lock in panic */
483 nolock
= !spin_trylock(&call_lock
);
484 local_irq_save(flags
);
485 __smp_call_function_mask(cpu_online_map
, stop_this_cpu
, NULL
, 0);
487 spin_unlock(&call_lock
);
488 disable_local_APIC();
489 local_irq_restore(flags
);
493 * Reschedule call back. Nothing to do,
494 * all the work is done automatically when
495 * we return from the interrupt.
497 asmlinkage
void smp_reschedule_interrupt(void)
500 add_pda(irq_resched_count
, 1);
503 asmlinkage
void smp_call_function_interrupt(void)
505 void (*func
) (void *info
) = call_data
->func
;
506 void *info
= call_data
->info
;
507 int wait
= call_data
->wait
;
511 * Notify initiating CPU that I've grabbed the data and am
512 * about to execute the function
515 atomic_inc(&call_data
->started
);
517 * At this point the info structure may be out of scope unless wait==1
522 add_pda(irq_call_count
, 1);
526 atomic_inc(&call_data
->finished
);