4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
11 ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
19 #include <linux/types.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/kernel_stat.h>
31 #include <linux/err.h>
32 #include <linux/delay.h>
33 #include <linux/bitops.h>
35 #include <asm/system.h>
36 #include <asm/atomic.h>
37 #include <asm/current.h>
38 #include <asm/delay.h>
39 #include <asm/tlbflush.h>
42 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
43 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/pgalloc.h>
47 #include <asm/processor.h>
48 #include <asm/ptrace.h>
49 #include <asm/unistd.h>
50 #include <asm/cacheflush.h>
54 static int smp_debug_lvl
= 0;
55 #define smp_debug(lvl, printargs...) \
56 if (lvl >= smp_debug_lvl) \
59 #define smp_debug(lvl, ...)
60 #endif /* DEBUG_SMP */
62 DEFINE_SPINLOCK(smp_lock
);
64 volatile struct task_struct
*smp_init_current_idle_task
;
66 static volatile int cpu_now_booting __read_mostly
= 0; /* track which CPU is booting */
68 static int parisc_max_cpus __read_mostly
= 1;
70 /* online cpus are ones that we've managed to bring up completely
71 * possible cpus are all valid cpu
72 * present cpus are all detected cpu
74 * On startup we bring up the "possible" cpus. Since we discover
75 * CPUs later, we add them as hotplug, so the possible cpu mask is
76 * empty in the beginning.
79 cpumask_t cpu_online_map __read_mostly
= CPU_MASK_NONE
; /* Bitmap of online CPUs */
80 cpumask_t cpu_possible_map __read_mostly
= CPU_MASK_ALL
; /* Bitmap of Present CPUs */
82 EXPORT_SYMBOL(cpu_online_map
);
83 EXPORT_SYMBOL(cpu_possible_map
);
85 DEFINE_PER_CPU(spinlock_t
, ipi_lock
) = SPIN_LOCK_UNLOCKED
;
87 struct smp_call_struct
{
88 void (*func
) (void *info
);
91 atomic_t unstarted_count
;
92 atomic_t unfinished_count
;
94 static volatile struct smp_call_struct
*smp_call_function_data
;
96 enum ipi_message_type
{
106 /********** SMP inter processor interrupt and communication routines */
108 #undef PER_CPU_IRQ_REGION
109 #ifdef PER_CPU_IRQ_REGION
110 /* XXX REVISIT Ignore for now.
111 ** *May* need this "hook" to register IPI handler
112 ** once we have perCPU ExtIntr switch tables.
117 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
119 if(cpu_online(cpuid
) )
121 switch_to_idle_task(current
);
130 ** Yoink this CPU from the runnable list...
136 /* REVISIT : redirect I/O Interrupts to another CPU? */
137 /* REVISIT : does PM *know* this CPU isn't available? */
138 cpu_clear(smp_processor_id(), cpu_online_map
);
146 ipi_interrupt(int irq
, void *dev_id
)
148 int this_cpu
= smp_processor_id();
149 struct cpuinfo_parisc
*p
= &cpu_data
[this_cpu
];
153 /* Count this now; we may make a call that never returns. */
156 mb(); /* Order interrupt and bit testing. */
159 spinlock_t
*lock
= &per_cpu(ipi_lock
, this_cpu
);
160 spin_lock_irqsave(lock
, flags
);
161 ops
= p
->pending_ipi
;
163 spin_unlock_irqrestore(lock
, flags
);
165 mb(); /* Order bit clearing and data access. */
171 unsigned long which
= ffz(~ops
);
173 ops
&= ~(1 << which
);
177 smp_debug(100, KERN_DEBUG
"CPU%d IPI_NOP\n", this_cpu
);
181 smp_debug(100, KERN_DEBUG
"CPU%d IPI_RESCHEDULE\n", this_cpu
);
183 * Reschedule callback. Everything to be
184 * done is done by the interrupt return path.
189 smp_debug(100, KERN_DEBUG
"CPU%d IPI_CALL_FUNC\n", this_cpu
);
191 volatile struct smp_call_struct
*data
;
192 void (*func
)(void *info
);
196 data
= smp_call_function_data
;
202 atomic_dec ((atomic_t
*)&data
->unstarted_count
);
204 /* At this point, *data can't
210 /* Notify the sending CPU that the
215 atomic_dec ((atomic_t
*)&data
->unfinished_count
);
220 smp_debug(100, KERN_DEBUG
"CPU%d IPI_CPU_START\n", this_cpu
);
224 smp_debug(100, KERN_DEBUG
"CPU%d IPI_CPU_STOP\n", this_cpu
);
229 smp_debug(100, KERN_DEBUG
"CPU%d is alive!\n", this_cpu
);
233 printk(KERN_CRIT
"Unknown IPI num on CPU%d: %lu\n",
237 /* let in any pending interrupts */
247 ipi_send(int cpu
, enum ipi_message_type op
)
249 struct cpuinfo_parisc
*p
= &cpu_data
[cpu
];
250 spinlock_t
*lock
= &per_cpu(ipi_lock
, cpu
);
253 spin_lock_irqsave(lock
, flags
);
254 p
->pending_ipi
|= 1 << op
;
255 gsc_writel(IPI_IRQ
- CPU_IRQ_BASE
, cpu_data
[cpu
].hpa
);
256 spin_unlock_irqrestore(lock
, flags
);
261 send_IPI_single(int dest_cpu
, enum ipi_message_type op
)
263 if (dest_cpu
== NO_PROC_ID
) {
268 ipi_send(dest_cpu
, op
);
272 send_IPI_allbutself(enum ipi_message_type op
)
276 for_each_online_cpu(i
) {
277 if (i
!= smp_processor_id())
278 send_IPI_single(i
, op
);
284 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP
); }
287 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START
); }
290 smp_send_reschedule(int cpu
) { send_IPI_single(cpu
, IPI_RESCHEDULE
); }
293 smp_send_all_nop(void)
295 send_IPI_allbutself(IPI_NOP
);
300 * Run a function on all other CPUs.
301 * <func> The function to run. This must be fast and non-blocking.
302 * <info> An arbitrary pointer to pass to the function.
303 * <retry> If true, keep retrying until ready.
304 * <wait> If true, wait until function has completed on other CPUs.
305 * [RETURNS] 0 on success, else a negative status code.
307 * Does not return until remote CPUs are nearly ready to execute <func>
312 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
314 struct smp_call_struct data
;
315 unsigned long timeout
;
316 static DEFINE_SPINLOCK(lock
);
319 if (num_online_cpus() < 2)
322 /* Can deadlock when called with interrupts disabled */
323 WARN_ON(irqs_disabled());
325 /* can also deadlock if IPIs are disabled */
326 WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX
- IPI_IRQ
))) == 0);
332 atomic_set(&data
.unstarted_count
, num_online_cpus() - 1);
333 atomic_set(&data
.unfinished_count
, num_online_cpus() - 1);
337 while (smp_call_function_data
!= 0)
342 if (smp_call_function_data
) {
348 smp_call_function_data
= &data
;
351 /* Send a message to all other CPUs and wait for them to respond */
352 send_IPI_allbutself(IPI_CALL_FUNC
);
355 /* Wait for response */
356 timeout
= jiffies
+ HZ
;
357 while ( (atomic_read (&data
.unstarted_count
) > 0) &&
358 time_before (jiffies
, timeout
) )
361 if (atomic_read (&data
.unstarted_count
) > 0) {
362 printk(KERN_CRIT
"SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
363 smp_processor_id(), ++retries
);
366 /* We either got one or timed out. Release the lock */
369 smp_call_function_data
= NULL
;
371 while (wait
&& atomic_read (&data
.unfinished_count
) > 0)
377 EXPORT_SYMBOL(smp_call_function
);
380 * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
381 * as we want to ensure all TLB's flushed before proceeding.
385 smp_flush_tlb_all(void)
387 on_each_cpu(flush_tlb_all_local
, NULL
, 1, 1);
391 * Called by secondaries to update state and initialize CPU registers.
394 smp_cpu_init(int cpunum
)
396 extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
397 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
398 extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
400 /* Set modes and Enable floating point coprocessor */
401 (void) init_per_cpu(cpunum
);
403 disable_sr_hashing();
407 /* Well, support 2.4 linux scheme as well. */
408 if (cpu_test_and_set(cpunum
, cpu_online_map
))
410 extern void machine_halt(void); /* arch/parisc.../process.c */
412 printk(KERN_CRIT
"CPU#%d already initialized!\n", cpunum
);
416 /* Initialise the idle task for this CPU */
417 atomic_inc(&init_mm
.mm_count
);
418 current
->active_mm
= &init_mm
;
421 enter_lazy_tlb(&init_mm
, current
);
423 init_IRQ(); /* make sure no IRQs are enabled or pending */
429 * Slaves start using C here. Indirectly called from smp_slave_stext.
430 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
432 void __init
smp_callin(void)
434 int slave_id
= cpu_now_booting
;
439 smp_cpu_init(slave_id
);
442 #if 0 /* NOT WORKING YET - see entry.S */
443 istack
= (void *)__get_free_pages(GFP_KERNEL
,ISTACK_ORDER
);
444 if (istack
== NULL
) {
445 printk(KERN_CRIT
"Failed to allocate interrupt stack for cpu %d\n",slave_id
);
451 flush_cache_all_local(); /* start with known state */
452 flush_tlb_all_local(NULL
);
454 local_irq_enable(); /* Interrupts have been off until now */
456 cpu_idle(); /* Wait for timer to schedule some work */
459 panic("smp_callin() AAAAaaaaahhhh....\n");
463 * Bring one cpu online.
465 int __cpuinit
smp_boot_one_cpu(int cpuid
)
467 struct task_struct
*idle
;
471 * Create an idle task for this CPU. Note the address wed* give
472 * to kernel_thread is irrelevant -- it's going to start
473 * where OS_BOOT_RENDEVZ vector in SAL says to start. But
474 * this gets all the other task-y sort of data structures set
475 * up like we wish. We need to pull the just created idle task
476 * off the run queue and stuff it into the init_tasks[] array.
480 idle
= fork_idle(cpuid
);
482 panic("SMP: fork failed for CPU:%d", cpuid
);
484 task_thread_info(idle
)->cpu
= cpuid
;
486 /* Let _start know what logical CPU we're booting
487 ** (offset into init_tasks[],cpu_data[])
489 cpu_now_booting
= cpuid
;
492 ** boot strap code needs to know the task address since
493 ** it also contains the process stack.
495 smp_init_current_idle_task
= idle
;
498 printk("Releasing cpu %d now, hpa=%lx\n", cpuid
, cpu_data
[cpuid
].hpa
);
501 ** This gets PDC to release the CPU from a very tight loop.
503 ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
504 ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
505 ** is executed after receiving the rendezvous signal (an interrupt to
506 ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
507 ** contents of memory are valid."
509 gsc_writel(TIMER_IRQ
- CPU_IRQ_BASE
, cpu_data
[cpuid
].hpa
);
513 * OK, wait a bit for that CPU to finish staggering about.
514 * Slave will set a bit when it reaches smp_cpu_init().
515 * Once the "monarch CPU" sees the bit change, it can move on.
517 for (timeout
= 0; timeout
< 10000; timeout
++) {
518 if(cpu_online(cpuid
)) {
519 /* Which implies Slave has started up */
521 smp_init_current_idle_task
= NULL
;
528 put_task_struct(idle
);
531 printk(KERN_CRIT
"SMP: CPU:%d is stuck.\n", cpuid
);
535 /* Remember the Slave data */
536 smp_debug(100, KERN_DEBUG
"SMP: CPU:%d came alive after %ld _us\n",
537 cpuid
, timeout
* 100);
541 void __devinit
smp_prepare_boot_cpu(void)
543 int bootstrap_processor
=cpu_data
[0].cpuid
; /* CPU ID of BSP */
545 /* Setup BSP mappings */
546 printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor
);
548 cpu_set(bootstrap_processor
, cpu_online_map
);
549 cpu_set(bootstrap_processor
, cpu_present_map
);
555 ** inventory.c:do_inventory() hasn't yet been run and thus we
556 ** don't 'discover' the additional CPUs until later.
558 void __init
smp_prepare_cpus(unsigned int max_cpus
)
560 cpus_clear(cpu_present_map
);
561 cpu_set(0, cpu_present_map
);
563 parisc_max_cpus
= max_cpus
;
565 printk(KERN_INFO
"SMP mode deactivated.\n");
569 void smp_cpus_done(unsigned int cpu_max
)
575 int __cpuinit
__cpu_up(unsigned int cpu
)
577 if (cpu
!= 0 && cpu
< parisc_max_cpus
)
578 smp_boot_one_cpu(cpu
);
580 return cpu_online(cpu
) ? 0 : -ENOSYS
;
583 #ifdef CONFIG_PROC_FS
585 setup_profiling_timer(unsigned int multiplier
)