4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
11 ** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
19 #include <linux/types.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/kernel_stat.h>
31 #include <linux/delay.h>
32 #include <linux/bitops.h>
34 #include <asm/system.h>
35 #include <asm/atomic.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
38 #include <asm/tlbflush.h>
41 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
42 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/pgalloc.h>
46 #include <asm/processor.h>
47 #include <asm/ptrace.h>
48 #include <asm/unistd.h>
49 #include <asm/cacheflush.h>
53 static int smp_debug_lvl
= 0;
54 #define smp_debug(lvl, printargs...) \
55 if (lvl >= smp_debug_lvl) \
58 #define smp_debug(lvl, ...)
59 #endif /* DEBUG_SMP */
61 DEFINE_SPINLOCK(smp_lock
);
63 volatile struct task_struct
*smp_init_current_idle_task
;
65 static volatile int cpu_now_booting __read_mostly
= 0; /* track which CPU is booting */
67 static int parisc_max_cpus __read_mostly
= 1;
69 /* online cpus are ones that we've managed to bring up completely
70 * possible cpus are all valid cpu
71 * present cpus are all detected cpu
73 * On startup we bring up the "possible" cpus. Since we discover
74 * CPUs later, we add them as hotplug, so the possible cpu mask is
75 * empty in the beginning.
78 cpumask_t cpu_online_map __read_mostly
= CPU_MASK_NONE
; /* Bitmap of online CPUs */
79 cpumask_t cpu_possible_map __read_mostly
= CPU_MASK_ALL
; /* Bitmap of Present CPUs */
81 EXPORT_SYMBOL(cpu_online_map
);
82 EXPORT_SYMBOL(cpu_possible_map
);
84 DEFINE_PER_CPU(spinlock_t
, ipi_lock
) = SPIN_LOCK_UNLOCKED
;
86 struct smp_call_struct
{
87 void (*func
) (void *info
);
90 atomic_t unstarted_count
;
91 atomic_t unfinished_count
;
93 static volatile struct smp_call_struct
*smp_call_function_data
;
95 enum ipi_message_type
{
105 /********** SMP inter processor interrupt and communication routines */
107 #undef PER_CPU_IRQ_REGION
108 #ifdef PER_CPU_IRQ_REGION
109 /* XXX REVISIT Ignore for now.
110 ** *May* need this "hook" to register IPI handler
111 ** once we have perCPU ExtIntr switch tables.
116 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
118 if(cpu_online(cpuid
) )
120 switch_to_idle_task(current
);
129 ** Yoink this CPU from the runnable list...
135 /* REVISIT : redirect I/O Interrupts to another CPU? */
136 /* REVISIT : does PM *know* this CPU isn't available? */
137 cpu_clear(smp_processor_id(), cpu_online_map
);
145 ipi_interrupt(int irq
, void *dev_id
)
147 int this_cpu
= smp_processor_id();
148 struct cpuinfo_parisc
*p
= &cpu_data
[this_cpu
];
152 /* Count this now; we may make a call that never returns. */
155 mb(); /* Order interrupt and bit testing. */
158 spinlock_t
*lock
= &per_cpu(ipi_lock
, this_cpu
);
159 spin_lock_irqsave(lock
, flags
);
160 ops
= p
->pending_ipi
;
162 spin_unlock_irqrestore(lock
, flags
);
164 mb(); /* Order bit clearing and data access. */
170 unsigned long which
= ffz(~ops
);
172 ops
&= ~(1 << which
);
176 smp_debug(100, KERN_DEBUG
"CPU%d IPI_NOP\n", this_cpu
);
180 smp_debug(100, KERN_DEBUG
"CPU%d IPI_RESCHEDULE\n", this_cpu
);
182 * Reschedule callback. Everything to be
183 * done is done by the interrupt return path.
188 smp_debug(100, KERN_DEBUG
"CPU%d IPI_CALL_FUNC\n", this_cpu
);
190 volatile struct smp_call_struct
*data
;
191 void (*func
)(void *info
);
195 data
= smp_call_function_data
;
201 atomic_dec ((atomic_t
*)&data
->unstarted_count
);
203 /* At this point, *data can't
209 /* Notify the sending CPU that the
214 atomic_dec ((atomic_t
*)&data
->unfinished_count
);
219 smp_debug(100, KERN_DEBUG
"CPU%d IPI_CPU_START\n", this_cpu
);
223 smp_debug(100, KERN_DEBUG
"CPU%d IPI_CPU_STOP\n", this_cpu
);
228 smp_debug(100, KERN_DEBUG
"CPU%d is alive!\n", this_cpu
);
232 printk(KERN_CRIT
"Unknown IPI num on CPU%d: %lu\n",
236 /* let in any pending interrupts */
246 ipi_send(int cpu
, enum ipi_message_type op
)
248 struct cpuinfo_parisc
*p
= &cpu_data
[cpu
];
249 spinlock_t
*lock
= &per_cpu(ipi_lock
, cpu
);
252 spin_lock_irqsave(lock
, flags
);
253 p
->pending_ipi
|= 1 << op
;
254 gsc_writel(IPI_IRQ
- CPU_IRQ_BASE
, cpu_data
[cpu
].hpa
);
255 spin_unlock_irqrestore(lock
, flags
);
260 send_IPI_single(int dest_cpu
, enum ipi_message_type op
)
262 if (dest_cpu
== NO_PROC_ID
) {
267 ipi_send(dest_cpu
, op
);
271 send_IPI_allbutself(enum ipi_message_type op
)
275 for_each_online_cpu(i
) {
276 if (i
!= smp_processor_id())
277 send_IPI_single(i
, op
);
283 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP
); }
286 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START
); }
289 smp_send_reschedule(int cpu
) { send_IPI_single(cpu
, IPI_RESCHEDULE
); }
292 smp_send_all_nop(void)
294 send_IPI_allbutself(IPI_NOP
);
299 * Run a function on all other CPUs.
300 * <func> The function to run. This must be fast and non-blocking.
301 * <info> An arbitrary pointer to pass to the function.
302 * <retry> If true, keep retrying until ready.
303 * <wait> If true, wait until function has completed on other CPUs.
304 * [RETURNS] 0 on success, else a negative status code.
306 * Does not return until remote CPUs are nearly ready to execute <func>
311 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
313 struct smp_call_struct data
;
314 unsigned long timeout
;
315 static DEFINE_SPINLOCK(lock
);
318 if (num_online_cpus() < 2)
321 /* Can deadlock when called with interrupts disabled */
322 WARN_ON(irqs_disabled());
324 /* can also deadlock if IPIs are disabled */
325 WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX
- IPI_IRQ
))) == 0);
331 atomic_set(&data
.unstarted_count
, num_online_cpus() - 1);
332 atomic_set(&data
.unfinished_count
, num_online_cpus() - 1);
336 while (smp_call_function_data
!= 0)
341 if (smp_call_function_data
) {
347 smp_call_function_data
= &data
;
350 /* Send a message to all other CPUs and wait for them to respond */
351 send_IPI_allbutself(IPI_CALL_FUNC
);
354 /* Wait for response */
355 timeout
= jiffies
+ HZ
;
356 while ( (atomic_read (&data
.unstarted_count
) > 0) &&
357 time_before (jiffies
, timeout
) )
360 if (atomic_read (&data
.unstarted_count
) > 0) {
361 printk(KERN_CRIT
"SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
362 smp_processor_id(), ++retries
);
365 /* We either got one or timed out. Release the lock */
368 smp_call_function_data
= NULL
;
370 while (wait
&& atomic_read (&data
.unfinished_count
) > 0)
376 EXPORT_SYMBOL(smp_call_function
);
379 * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
380 * as we want to ensure all TLB's flushed before proceeding.
384 smp_flush_tlb_all(void)
386 on_each_cpu(flush_tlb_all_local
, NULL
, 1, 1);
390 * Called by secondaries to update state and initialize CPU registers.
393 smp_cpu_init(int cpunum
)
395 extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
396 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
397 extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
399 /* Set modes and Enable floating point coprocessor */
400 (void) init_per_cpu(cpunum
);
402 disable_sr_hashing();
406 /* Well, support 2.4 linux scheme as well. */
407 if (cpu_test_and_set(cpunum
, cpu_online_map
))
409 extern void machine_halt(void); /* arch/parisc.../process.c */
411 printk(KERN_CRIT
"CPU#%d already initialized!\n", cpunum
);
415 /* Initialise the idle task for this CPU */
416 atomic_inc(&init_mm
.mm_count
);
417 current
->active_mm
= &init_mm
;
420 enter_lazy_tlb(&init_mm
, current
);
422 init_IRQ(); /* make sure no IRQ's are enabled or pending */
428 * Slaves start using C here. Indirectly called from smp_slave_stext.
429 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
431 void __init
smp_callin(void)
433 int slave_id
= cpu_now_booting
;
438 smp_cpu_init(slave_id
);
441 #if 0 /* NOT WORKING YET - see entry.S */
442 istack
= (void *)__get_free_pages(GFP_KERNEL
,ISTACK_ORDER
);
443 if (istack
== NULL
) {
444 printk(KERN_CRIT
"Failed to allocate interrupt stack for cpu %d\n",slave_id
);
450 flush_cache_all_local(); /* start with known state */
451 flush_tlb_all_local(NULL
);
453 local_irq_enable(); /* Interrupts have been off until now */
455 cpu_idle(); /* Wait for timer to schedule some work */
458 panic("smp_callin() AAAAaaaaahhhh....\n");
462 * Bring one cpu online.
464 int __init
smp_boot_one_cpu(int cpuid
)
466 struct task_struct
*idle
;
470 * Create an idle task for this CPU. Note the address wed* give
471 * to kernel_thread is irrelevant -- it's going to start
472 * where OS_BOOT_RENDEVZ vector in SAL says to start. But
473 * this gets all the other task-y sort of data structures set
474 * up like we wish. We need to pull the just created idle task
475 * off the run queue and stuff it into the init_tasks[] array.
479 idle
= fork_idle(cpuid
);
481 panic("SMP: fork failed for CPU:%d", cpuid
);
483 task_thread_info(idle
)->cpu
= cpuid
;
485 /* Let _start know what logical CPU we're booting
486 ** (offset into init_tasks[],cpu_data[])
488 cpu_now_booting
= cpuid
;
491 ** boot strap code needs to know the task address since
492 ** it also contains the process stack.
494 smp_init_current_idle_task
= idle
;
497 printk("Releasing cpu %d now, hpa=%lx\n", cpuid
, cpu_data
[cpuid
].hpa
);
500 ** This gets PDC to release the CPU from a very tight loop.
502 ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
503 ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
504 ** is executed after receiving the rendezvous signal (an interrupt to
505 ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
506 ** contents of memory are valid."
508 gsc_writel(TIMER_IRQ
- CPU_IRQ_BASE
, cpu_data
[cpuid
].hpa
);
512 * OK, wait a bit for that CPU to finish staggering about.
513 * Slave will set a bit when it reaches smp_cpu_init().
514 * Once the "monarch CPU" sees the bit change, it can move on.
516 for (timeout
= 0; timeout
< 10000; timeout
++) {
517 if(cpu_online(cpuid
)) {
518 /* Which implies Slave has started up */
520 smp_init_current_idle_task
= NULL
;
527 put_task_struct(idle
);
530 printk(KERN_CRIT
"SMP: CPU:%d is stuck.\n", cpuid
);
534 /* Remember the Slave data */
535 smp_debug(100, KERN_DEBUG
"SMP: CPU:%d came alive after %ld _us\n",
536 cpuid
, timeout
* 100);
540 void __devinit
smp_prepare_boot_cpu(void)
542 int bootstrap_processor
=cpu_data
[0].cpuid
; /* CPU ID of BSP */
544 /* Setup BSP mappings */
545 printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor
);
547 cpu_set(bootstrap_processor
, cpu_online_map
);
548 cpu_set(bootstrap_processor
, cpu_present_map
);
554 ** inventory.c:do_inventory() hasn't yet been run and thus we
555 ** don't 'discover' the additional CPU's until later.
557 void __init
smp_prepare_cpus(unsigned int max_cpus
)
559 cpus_clear(cpu_present_map
);
560 cpu_set(0, cpu_present_map
);
562 parisc_max_cpus
= max_cpus
;
564 printk(KERN_INFO
"SMP mode deactivated.\n");
568 void smp_cpus_done(unsigned int cpu_max
)
574 int __cpuinit
__cpu_up(unsigned int cpu
)
576 if (cpu
!= 0 && cpu
< parisc_max_cpus
)
577 smp_boot_one_cpu(cpu
);
579 return cpu_online(cpu
) ? 0 : -ENOSYS
;
582 #ifdef CONFIG_PROC_FS
584 setup_profiling_timer(unsigned int multiplier
)