Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / arch / ia64 / kernel / smp.c
blob4e446aa5f4ac98c0011552a68db633fcb68a3324
1 /*
2 * SMP Support
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
17 * scheme.
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/mm.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
33 #include <linux/kexec.h>
35 #include <asm/atomic.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
38 #include <asm/machvec.h>
39 #include <asm/io.h>
40 #include <asm/irq.h>
41 #include <asm/page.h>
42 #include <asm/pgalloc.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
46 #include <asm/sal.h>
47 #include <asm/system.h>
48 #include <asm/tlbflush.h>
49 #include <asm/unistd.h>
50 #include <asm/mca.h>
53 * Note: alignment of 4 entries/cacheline was empirically determined
54 * to be a good tradeoff between hot cachelines & spreading the array
55 * across too many cacheline.
57 static struct local_tlb_flush_counts {
58 unsigned int count;
59 } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
61 static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
65 * Structure and data for smp_call_function(). This is designed to minimise static memory
66 * requirements. It also looks cleaner.
68 static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
70 struct call_data_struct {
71 void (*func) (void *info);
72 void *info;
73 long wait;
74 atomic_t started;
75 atomic_t finished;
78 static volatile struct call_data_struct *call_data;
80 #define IPI_CALL_FUNC 0
81 #define IPI_CPU_STOP 1
82 #define IPI_KDUMP_CPU_STOP 3
84 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
85 static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
87 extern void cpu_halt (void);
89 void
90 lock_ipi_calllock(void)
92 spin_lock_irq(&call_lock);
95 void
96 unlock_ipi_calllock(void)
98 spin_unlock_irq(&call_lock);
101 static void
102 stop_this_cpu (void)
105 * Remove this CPU:
107 cpu_clear(smp_processor_id(), cpu_online_map);
108 max_xtp();
109 local_irq_disable();
110 cpu_halt();
113 void
114 cpu_die(void)
116 max_xtp();
117 local_irq_disable();
118 cpu_halt();
119 /* Should never be here */
120 BUG();
121 for (;;);
124 irqreturn_t
125 handle_IPI (int irq, void *dev_id)
127 int this_cpu = get_cpu();
128 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
129 unsigned long ops;
131 mb(); /* Order interrupt and bit testing. */
132 while ((ops = xchg(pending_ipis, 0)) != 0) {
133 mb(); /* Order bit clearing and data access. */
134 do {
135 unsigned long which;
137 which = ffz(~ops);
138 ops &= ~(1 << which);
140 switch (which) {
141 case IPI_CALL_FUNC:
143 struct call_data_struct *data;
144 void (*func)(void *info);
145 void *info;
146 int wait;
148 /* release the 'pointer lock' */
149 data = (struct call_data_struct *) call_data;
150 func = data->func;
151 info = data->info;
152 wait = data->wait;
154 mb();
155 atomic_inc(&data->started);
157 * At this point the structure may be gone unless
158 * wait is true.
160 (*func)(info);
162 /* Notify the sending CPU that the task is done. */
163 mb();
164 if (wait)
165 atomic_inc(&data->finished);
167 break;
169 case IPI_CPU_STOP:
170 stop_this_cpu();
171 break;
172 #ifdef CONFIG_KEXEC
173 case IPI_KDUMP_CPU_STOP:
174 unw_init_running(kdump_cpu_freeze, NULL);
175 break;
176 #endif
177 default:
178 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
179 break;
181 } while (ops);
182 mb(); /* Order data access and bit testing. */
184 put_cpu();
185 return IRQ_HANDLED;
189 * Called with preemption disabled.
191 static inline void
192 send_IPI_single (int dest_cpu, int op)
194 set_bit(op, &per_cpu(ipi_operation, dest_cpu));
195 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
199 * Called with preemption disabled.
201 static inline void
202 send_IPI_allbutself (int op)
204 unsigned int i;
206 for_each_online_cpu(i) {
207 if (i != smp_processor_id())
208 send_IPI_single(i, op);
213 * Called with preemption disabled.
215 static inline void
216 send_IPI_all (int op)
218 int i;
220 for_each_online_cpu(i) {
221 send_IPI_single(i, op);
226 * Called with preemption disabled.
228 static inline void
229 send_IPI_self (int op)
231 send_IPI_single(smp_processor_id(), op);
234 #ifdef CONFIG_KEXEC
235 void
236 kdump_smp_send_stop(void)
238 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
241 void
242 kdump_smp_send_init(void)
244 unsigned int cpu, self_cpu;
245 self_cpu = smp_processor_id();
246 for_each_online_cpu(cpu) {
247 if (cpu != self_cpu) {
248 if(kdump_status[cpu] == 0)
249 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
253 #endif
255 * Called with preemption disabled.
257 void
258 smp_send_reschedule (int cpu)
260 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
264 * Called with preemption disabled.
266 static void
267 smp_send_local_flush_tlb (int cpu)
269 platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
272 void
273 smp_local_flush_tlb(void)
276 * Use atomic ops. Otherwise, the load/increment/store sequence from
277 * a "++" operation can have the line stolen between the load & store.
278 * The overhead of the atomic op in negligible in this case & offers
279 * significant benefit for the brief periods where lots of cpus
280 * are simultaneously flushing TLBs.
282 ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
283 local_flush_tlb_all();
286 #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
288 void
289 smp_flush_tlb_cpumask(cpumask_t xcpumask)
291 unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
292 cpumask_t cpumask = xcpumask;
293 int mycpu, cpu, flush_mycpu = 0;
295 preempt_disable();
296 mycpu = smp_processor_id();
298 for_each_cpu_mask(cpu, cpumask)
299 counts[cpu] = local_tlb_flush_counts[cpu].count;
301 mb();
302 for_each_cpu_mask(cpu, cpumask) {
303 if (cpu == mycpu)
304 flush_mycpu = 1;
305 else
306 smp_send_local_flush_tlb(cpu);
309 if (flush_mycpu)
310 smp_local_flush_tlb();
312 for_each_cpu_mask(cpu, cpumask)
313 while(counts[cpu] == local_tlb_flush_counts[cpu].count)
314 udelay(FLUSH_DELAY);
316 preempt_enable();
319 void
320 smp_flush_tlb_all (void)
322 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
325 void
326 smp_flush_tlb_mm (struct mm_struct *mm)
328 preempt_disable();
329 /* this happens for the common case of a single-threaded fork(): */
330 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
332 local_finish_flush_tlb_mm(mm);
333 preempt_enable();
334 return;
337 preempt_enable();
339 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
340 * have been running in the address space. It's not clear that this is worth the
341 * trouble though: to avoid races, we have to raise the IPI on the target CPU
342 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
343 * rather trivial.
345 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
349 * Run a function on a specific CPU
350 * <func> The function to run. This must be fast and non-blocking.
351 * <info> An arbitrary pointer to pass to the function.
352 * <nonatomic> Currently unused.
353 * <wait> If true, wait until function has completed on other CPUs.
354 * [RETURNS] 0 on success, else a negative status code.
356 * Does not return until the remote CPU is nearly ready to execute <func>
357 * or is or has executed.
361 smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
362 int wait)
364 struct call_data_struct data;
365 int cpus = 1;
366 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
368 if (cpuid == me) {
369 local_irq_disable();
370 func(info);
371 local_irq_enable();
372 put_cpu();
373 return 0;
376 data.func = func;
377 data.info = info;
378 atomic_set(&data.started, 0);
379 data.wait = wait;
380 if (wait)
381 atomic_set(&data.finished, 0);
383 spin_lock_bh(&call_lock);
385 call_data = &data;
386 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
387 send_IPI_single(cpuid, IPI_CALL_FUNC);
389 /* Wait for response */
390 while (atomic_read(&data.started) != cpus)
391 cpu_relax();
393 if (wait)
394 while (atomic_read(&data.finished) != cpus)
395 cpu_relax();
396 call_data = NULL;
398 spin_unlock_bh(&call_lock);
399 put_cpu();
400 return 0;
402 EXPORT_SYMBOL(smp_call_function_single);
405 * this function sends a 'generic call function' IPI to all other CPUs
406 * in the system.
410 * [SUMMARY] Run a function on all other CPUs.
411 * <func> The function to run. This must be fast and non-blocking.
412 * <info> An arbitrary pointer to pass to the function.
413 * <nonatomic> currently unused.
414 * <wait> If true, wait (atomically) until function has completed on other CPUs.
415 * [RETURNS] 0 on success, else a negative status code.
417 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
418 * executed.
420 * You must not call this function with disabled interrupts or from a
421 * hardware interrupt handler or from a bottom half handler.
424 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
426 struct call_data_struct data;
427 int cpus;
429 spin_lock(&call_lock);
430 cpus = num_online_cpus() - 1;
431 if (!cpus) {
432 spin_unlock(&call_lock);
433 return 0;
436 /* Can deadlock when called with interrupts disabled */
437 WARN_ON(irqs_disabled());
439 data.func = func;
440 data.info = info;
441 atomic_set(&data.started, 0);
442 data.wait = wait;
443 if (wait)
444 atomic_set(&data.finished, 0);
446 call_data = &data;
447 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
448 send_IPI_allbutself(IPI_CALL_FUNC);
450 /* Wait for response */
451 while (atomic_read(&data.started) != cpus)
452 cpu_relax();
454 if (wait)
455 while (atomic_read(&data.finished) != cpus)
456 cpu_relax();
457 call_data = NULL;
459 spin_unlock(&call_lock);
460 return 0;
462 EXPORT_SYMBOL(smp_call_function);
465 * this function calls the 'stop' function on all other CPUs in the system.
467 void
468 smp_send_stop (void)
470 send_IPI_allbutself(IPI_CPU_STOP);
474 setup_profiling_timer (unsigned int multiplier)
476 return -EINVAL;