4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/sysdev.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
35 #include <asm/ptrace.h>
36 #include <asm/atomic.h>
39 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/system.h>
48 #include <asm/vdso_datapage.h>
55 #define DBG(fmt...) udbg_printf(fmt)
60 struct thread_info
*secondary_ti
;
62 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
) = CPU_MASK_NONE
;
63 DEFINE_PER_CPU(cpumask_t
, cpu_core_map
) = CPU_MASK_NONE
;
65 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
66 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
68 /* SMP operations for this machine */
69 struct smp_ops_t
*smp_ops
;
71 static volatile unsigned int cpu_callin_map
[NR_CPUS
];
73 int smt_enabled_at_boot
= 1;
75 static void (*crash_ipi_function_ptr
)(struct pt_regs
*) = NULL
;
78 void __devinit
smp_generic_kick_cpu(int nr
)
80 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
83 * The processor is currently spinning, waiting for the
84 * cpu_start field to become non-zero After we set cpu_start,
85 * the processor will continue on to secondary_start
87 paca
[nr
].cpu_start
= 1;
92 void smp_message_recv(int msg
)
95 case PPC_MSG_CALL_FUNCTION
:
96 generic_smp_call_function_interrupt();
98 case PPC_MSG_RESCHEDULE
:
99 /* we notice need_resched on exit */
101 case PPC_MSG_CALL_FUNC_SINGLE
:
102 generic_smp_call_function_single_interrupt();
104 case PPC_MSG_DEBUGGER_BREAK
:
105 if (crash_ipi_function_ptr
) {
106 crash_ipi_function_ptr(get_irq_regs());
109 #ifdef CONFIG_DEBUGGER
110 debugger_ipi(get_irq_regs());
112 #endif /* CONFIG_DEBUGGER */
115 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
116 smp_processor_id(), msg
);
121 static irqreturn_t
call_function_action(int irq
, void *data
)
123 generic_smp_call_function_interrupt();
127 static irqreturn_t
reschedule_action(int irq
, void *data
)
129 /* we just need the return path side effect of checking need_resched */
133 static irqreturn_t
call_function_single_action(int irq
, void *data
)
135 generic_smp_call_function_single_interrupt();
139 static irqreturn_t
debug_ipi_action(int irq
, void *data
)
141 smp_message_recv(PPC_MSG_DEBUGGER_BREAK
);
145 static irq_handler_t smp_ipi_action
[] = {
146 [PPC_MSG_CALL_FUNCTION
] = call_function_action
,
147 [PPC_MSG_RESCHEDULE
] = reschedule_action
,
148 [PPC_MSG_CALL_FUNC_SINGLE
] = call_function_single_action
,
149 [PPC_MSG_DEBUGGER_BREAK
] = debug_ipi_action
,
152 const char *smp_ipi_name
[] = {
153 [PPC_MSG_CALL_FUNCTION
] = "ipi call function",
154 [PPC_MSG_RESCHEDULE
] = "ipi reschedule",
155 [PPC_MSG_CALL_FUNC_SINGLE
] = "ipi call function single",
156 [PPC_MSG_DEBUGGER_BREAK
] = "ipi debugger",
159 /* optional function to request ipi, for controllers with >= 4 ipis */
160 int smp_request_message_ipi(int virq
, int msg
)
164 if (msg
< 0 || msg
> PPC_MSG_DEBUGGER_BREAK
) {
167 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
168 if (msg
== PPC_MSG_DEBUGGER_BREAK
) {
172 err
= request_irq(virq
, smp_ipi_action
[msg
], IRQF_DISABLED
|IRQF_PERCPU
,
173 smp_ipi_name
[msg
], 0);
174 WARN(err
< 0, "unable to request_irq %d for %s (rc %d)\n",
175 virq
, smp_ipi_name
[msg
], err
);
180 void smp_send_reschedule(int cpu
)
183 smp_ops
->message_pass(cpu
, PPC_MSG_RESCHEDULE
);
186 void arch_send_call_function_single_ipi(int cpu
)
188 smp_ops
->message_pass(cpu
, PPC_MSG_CALL_FUNC_SINGLE
);
191 void arch_send_call_function_ipi(cpumask_t mask
)
195 for_each_cpu_mask(cpu
, mask
)
196 smp_ops
->message_pass(cpu
, PPC_MSG_CALL_FUNCTION
);
199 #ifdef CONFIG_DEBUGGER
200 void smp_send_debugger_break(int cpu
)
203 smp_ops
->message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
208 void crash_send_ipi(void (*crash_ipi_callback
)(struct pt_regs
*))
210 crash_ipi_function_ptr
= crash_ipi_callback
;
211 if (crash_ipi_callback
&& smp_ops
) {
213 smp_ops
->message_pass(MSG_ALL_BUT_SELF
, PPC_MSG_DEBUGGER_BREAK
);
218 static void stop_this_cpu(void *dummy
)
225 void smp_send_stop(void)
227 smp_call_function(stop_this_cpu
, NULL
, 0);
230 struct thread_info
*current_set
[NR_CPUS
];
232 static void __devinit
smp_store_cpu_info(int id
)
234 per_cpu(pvr
, id
) = mfspr(SPRN_PVR
);
237 static void __init
smp_create_idle(unsigned int cpu
)
239 struct task_struct
*p
;
241 /* create a process for the processor */
244 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
246 paca
[cpu
].__current
= p
;
247 paca
[cpu
].kstack
= (unsigned long) task_thread_info(p
)
248 + THREAD_SIZE
- STACK_FRAME_OVERHEAD
;
250 current_set
[cpu
] = task_thread_info(p
);
251 task_thread_info(p
)->cpu
= cpu
;
254 void __init
smp_prepare_cpus(unsigned int max_cpus
)
258 DBG("smp_prepare_cpus\n");
261 * setup_cpu may need to be called on the boot cpu. We havent
262 * spun any cpus up but lets be paranoid.
264 BUG_ON(boot_cpuid
!= smp_processor_id());
267 smp_store_cpu_info(boot_cpuid
);
268 cpu_callin_map
[boot_cpuid
] = 1;
271 max_cpus
= smp_ops
->probe();
275 smp_space_timers(max_cpus
);
277 for_each_possible_cpu(cpu
)
278 if (cpu
!= boot_cpuid
)
279 smp_create_idle(cpu
);
282 void __devinit
smp_prepare_boot_cpu(void)
284 BUG_ON(smp_processor_id() != boot_cpuid
);
286 cpu_set(boot_cpuid
, cpu_online_map
);
287 cpu_set(boot_cpuid
, per_cpu(cpu_sibling_map
, boot_cpuid
));
288 cpu_set(boot_cpuid
, per_cpu(cpu_core_map
, boot_cpuid
));
290 paca
[boot_cpuid
].__current
= current
;
292 current_set
[boot_cpuid
] = task_thread_info(current
);
295 #ifdef CONFIG_HOTPLUG_CPU
296 /* State of each CPU during hotplug phases */
297 DEFINE_PER_CPU(int, cpu_state
) = { 0 };
299 int generic_cpu_disable(void)
301 unsigned int cpu
= smp_processor_id();
303 if (cpu
== boot_cpuid
)
306 cpu_clear(cpu
, cpu_online_map
);
308 vdso_data
->processorCount
--;
309 fixup_irqs(cpu_online_map
);
314 int generic_cpu_enable(unsigned int cpu
)
316 /* Do the normal bootup if we haven't
317 * already bootstrapped. */
318 if (system_state
!= SYSTEM_RUNNING
)
321 /* get the target out of it's holding state */
322 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
325 while (!cpu_online(cpu
))
329 fixup_irqs(cpu_online_map
);
330 /* counter the irq disable in fixup_irqs */
336 void generic_cpu_die(unsigned int cpu
)
340 for (i
= 0; i
< 100; i
++) {
342 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
346 printk(KERN_ERR
"CPU%d didn't die...\n", cpu
);
349 void generic_mach_cpu_die(void)
354 cpu
= smp_processor_id();
355 printk(KERN_DEBUG
"CPU%d offline\n", cpu
);
356 __get_cpu_var(cpu_state
) = CPU_DEAD
;
358 while (__get_cpu_var(cpu_state
) != CPU_UP_PREPARE
)
360 cpu_set(cpu
, cpu_online_map
);
365 static int __devinit
cpu_enable(unsigned int cpu
)
367 if (smp_ops
&& smp_ops
->cpu_enable
)
368 return smp_ops
->cpu_enable(cpu
);
373 int __cpuinit
__cpu_up(unsigned int cpu
)
377 secondary_ti
= current_set
[cpu
];
378 if (!cpu_enable(cpu
))
381 if (smp_ops
== NULL
||
382 (smp_ops
->cpu_bootable
&& !smp_ops
->cpu_bootable(cpu
)))
385 /* Make sure callin-map entry is 0 (can be leftover a CPU
388 cpu_callin_map
[cpu
] = 0;
390 /* The information for processor bringup must
391 * be written out to main store before we release
397 DBG("smp: kicking cpu %d\n", cpu
);
398 smp_ops
->kick_cpu(cpu
);
401 * wait to see if the cpu made a callin (is actually up).
402 * use this value that I found through experimentation.
405 if (system_state
< SYSTEM_RUNNING
)
406 for (c
= 50000; c
&& !cpu_callin_map
[cpu
]; c
--)
408 #ifdef CONFIG_HOTPLUG_CPU
411 * CPUs can take much longer to come up in the
412 * hotplug case. Wait five seconds.
414 for (c
= 25; c
&& !cpu_callin_map
[cpu
]; c
--) {
419 if (!cpu_callin_map
[cpu
]) {
420 printk("Processor %u is stuck.\n", cpu
);
424 printk("Processor %u found.\n", cpu
);
426 if (smp_ops
->give_timebase
)
427 smp_ops
->give_timebase();
429 /* Wait until cpu puts itself in the online map */
430 while (!cpu_online(cpu
))
436 /* Return the value of the reg property corresponding to the given
439 int cpu_to_core_id(int cpu
)
441 struct device_node
*np
;
445 np
= of_get_cpu_node(cpu
, NULL
);
449 reg
= of_get_property(np
, "reg", NULL
);
459 /* Must be called when no change can occur to cpu_present_map,
460 * i.e. during cpu online or offline.
462 static struct device_node
*cpu_to_l2cache(int cpu
)
464 struct device_node
*np
;
465 struct device_node
*cache
;
467 if (!cpu_present(cpu
))
470 np
= of_get_cpu_node(cpu
, NULL
);
474 cache
= of_find_next_cache_node(np
);
481 /* Activate a secondary processor. */
482 int __devinit
start_secondary(void *unused
)
484 unsigned int cpu
= smp_processor_id();
485 struct device_node
*l2_cache
;
488 atomic_inc(&init_mm
.mm_count
);
489 current
->active_mm
= &init_mm
;
491 smp_store_cpu_info(cpu
);
492 set_dec(tb_ticks_per_jiffy
);
494 cpu_callin_map
[cpu
] = 1;
496 smp_ops
->setup_cpu(cpu
);
497 if (smp_ops
->take_timebase
)
498 smp_ops
->take_timebase();
500 if (system_state
> SYSTEM_BOOTING
)
503 secondary_cpu_time_init();
506 notify_cpu_starting(cpu
);
507 cpu_set(cpu
, cpu_online_map
);
508 /* Update sibling maps */
509 base
= cpu_first_thread_in_core(cpu
);
510 for (i
= 0; i
< threads_per_core
; i
++) {
511 if (cpu_is_offline(base
+ i
))
513 cpu_set(cpu
, per_cpu(cpu_sibling_map
, base
+ i
));
514 cpu_set(base
+ i
, per_cpu(cpu_sibling_map
, cpu
));
516 /* cpu_core_map should be a superset of
517 * cpu_sibling_map even if we don't have cache
518 * information, so update the former here, too.
520 cpu_set(cpu
, per_cpu(cpu_core_map
, base
+i
));
521 cpu_set(base
+ i
, per_cpu(cpu_core_map
, cpu
));
523 l2_cache
= cpu_to_l2cache(cpu
);
524 for_each_online_cpu(i
) {
525 struct device_node
*np
= cpu_to_l2cache(i
);
528 if (np
== l2_cache
) {
529 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
530 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
534 of_node_put(l2_cache
);
543 int setup_profiling_timer(unsigned int multiplier
)
548 void __init
smp_cpus_done(unsigned int max_cpus
)
552 /* We want the setup_cpu() here to be called from CPU 0, but our
553 * init thread may have been "borrowed" by another CPU in the meantime
554 * se we pin us down to CPU 0 for a short while
556 old_mask
= current
->cpus_allowed
;
557 set_cpus_allowed(current
, cpumask_of_cpu(boot_cpuid
));
560 smp_ops
->setup_cpu(boot_cpuid
);
562 set_cpus_allowed(current
, old_mask
);
564 snapshot_timebases();
566 dump_numa_cpu_topology();
569 #ifdef CONFIG_HOTPLUG_CPU
570 int __cpu_disable(void)
572 struct device_node
*l2_cache
;
573 int cpu
= smp_processor_id();
577 if (!smp_ops
->cpu_disable
)
580 err
= smp_ops
->cpu_disable();
584 /* Update sibling maps */
585 base
= cpu_first_thread_in_core(cpu
);
586 for (i
= 0; i
< threads_per_core
; i
++) {
587 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, base
+ i
));
588 cpu_clear(base
+ i
, per_cpu(cpu_sibling_map
, cpu
));
589 cpu_clear(cpu
, per_cpu(cpu_core_map
, base
+i
));
590 cpu_clear(base
+ i
, per_cpu(cpu_core_map
, cpu
));
593 l2_cache
= cpu_to_l2cache(cpu
);
594 for_each_present_cpu(i
) {
595 struct device_node
*np
= cpu_to_l2cache(i
);
598 if (np
== l2_cache
) {
599 cpu_clear(cpu
, per_cpu(cpu_core_map
, i
));
600 cpu_clear(i
, per_cpu(cpu_core_map
, cpu
));
604 of_node_put(l2_cache
);
610 void __cpu_die(unsigned int cpu
)
612 if (smp_ops
->cpu_die
)
613 smp_ops
->cpu_die(cpu
);