initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / ppc64 / kernel / smp.c
blob75ced752807e008bf009350e261324fcc800d9db
1 /*
2 * SMP support for ppc.
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #undef DEBUG
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/cache.h>
32 #include <linux/err.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
36 #include <asm/ptrace.h>
37 #include <asm/atomic.h>
38 #include <asm/irq.h>
39 #include <asm/page.h>
40 #include <asm/pgtable.h>
41 #include <asm/io.h>
42 #include <asm/prom.h>
43 #include <asm/smp.h>
44 #include <asm/naca.h>
45 #include <asm/paca.h>
46 #include <asm/iSeries/LparData.h>
47 #include <asm/iSeries/HvCall.h>
48 #include <asm/iSeries/HvCallCfg.h>
49 #include <asm/time.h>
50 #include <asm/ppcdebug.h>
51 #include "open_pic.h"
52 #include <asm/machdep.h>
53 #include <asm/xics.h>
54 #include <asm/cputable.h>
55 #include <asm/system.h>
56 #include <asm/rtas.h>
57 #include <asm/plpar_wrappers.h>
59 #ifdef DEBUG
60 #define DBG(fmt...) udbg_printf(fmt)
61 #else
62 #define DBG(fmt...)
63 #endif
65 int smp_threads_ready;
66 unsigned long cache_decay_ticks;
68 cpumask_t cpu_possible_map = CPU_MASK_NONE;
69 cpumask_t cpu_online_map = CPU_MASK_NONE;
70 cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
72 EXPORT_SYMBOL(cpu_online_map);
73 EXPORT_SYMBOL(cpu_possible_map);
75 struct smp_ops_t *smp_ops;
77 static volatile unsigned int cpu_callin_map[NR_CPUS];
79 extern unsigned char stab_array[];
81 extern int cpu_idle(void *unused);
82 void smp_call_function_interrupt(void);
83 extern long register_vpa(unsigned long flags, unsigned long proc,
84 unsigned long vpa);
86 int smt_enabled_at_boot = 1;
88 /* Low level assembly function used to backup CPU 0 state */
89 extern void __save_cpu_setup(void);
91 extern void pseries_secondary_smp_init(unsigned long);
93 #ifdef CONFIG_PPC_ISERIES
94 static unsigned long iSeries_smp_message[NR_CPUS];
96 void iSeries_smp_message_recv( struct pt_regs * regs )
98 int cpu = smp_processor_id();
99 int msg;
101 if ( num_online_cpus() < 2 )
102 return;
104 for ( msg = 0; msg < 4; ++msg )
105 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
106 smp_message_recv( msg, regs );
109 static inline void smp_iSeries_do_message(int cpu, int msg)
111 set_bit(msg, &iSeries_smp_message[cpu]);
112 HvCall_sendIPI(&(paca[cpu]));
115 static void smp_iSeries_message_pass(int target, int msg)
117 int i;
119 if (target < NR_CPUS)
120 smp_iSeries_do_message(target, msg);
121 else {
122 for_each_online_cpu(i) {
123 if (target == MSG_ALL_BUT_SELF
124 && i == smp_processor_id())
125 continue;
126 smp_iSeries_do_message(i, msg);
131 static int smp_iSeries_numProcs(void)
133 unsigned np, i;
135 np = 0;
136 for (i=0; i < NR_CPUS; ++i) {
137 if (paca[i].lppaca.xDynProcStatus < 2) {
138 cpu_set(i, cpu_possible_map);
139 cpu_set(i, cpu_present_map);
140 ++np;
143 return np;
146 static int smp_iSeries_probe(void)
148 unsigned i;
149 unsigned np = 0;
151 for (i=0; i < NR_CPUS; ++i) {
152 if (paca[i].lppaca.xDynProcStatus < 2) {
153 /*paca[i].active = 1;*/
154 ++np;
158 return np;
161 static void smp_iSeries_kick_cpu(int nr)
163 BUG_ON(nr < 0 || nr >= NR_CPUS);
165 /* Verify that our partition has a processor nr */
166 if (paca[nr].lppaca.xDynProcStatus >= 2)
167 return;
169 /* The processor is currently spinning, waiting
170 * for the cpu_start field to become non-zero
171 * After we set cpu_start, the processor will
172 * continue on to secondary_start in iSeries_head.S
174 paca[nr].cpu_start = 1;
177 static void __devinit smp_iSeries_setup_cpu(int nr)
181 static struct smp_ops_t iSeries_smp_ops = {
182 .message_pass = smp_iSeries_message_pass,
183 .probe = smp_iSeries_probe,
184 .kick_cpu = smp_iSeries_kick_cpu,
185 .setup_cpu = smp_iSeries_setup_cpu,
188 /* This is called very early. */
189 void __init smp_init_iSeries(void)
191 smp_ops = &iSeries_smp_ops;
192 systemcfg->processorCount = smp_iSeries_numProcs();
194 #endif
196 #ifdef CONFIG_PPC_MULTIPLATFORM
197 void smp_openpic_message_pass(int target, int msg)
199 /* make sure we're sending something that translates to an IPI */
200 if ( msg > 0x3 ){
201 printk("SMP %d: smp_message_pass: unknown msg %d\n",
202 smp_processor_id(), msg);
203 return;
205 switch ( target )
207 case MSG_ALL:
208 openpic_cause_IPI(msg, 0xffffffff);
209 break;
210 case MSG_ALL_BUT_SELF:
211 openpic_cause_IPI(msg,
212 0xffffffff & ~(1 << smp_processor_id()));
213 break;
214 default:
215 openpic_cause_IPI(msg, 1<<target);
216 break;
220 static int __init smp_openpic_probe(void)
222 int nr_cpus;
224 nr_cpus = cpus_weight(cpu_possible_map);
226 if (nr_cpus > 1)
227 openpic_request_IPIs();
229 return nr_cpus;
232 static void __devinit smp_openpic_setup_cpu(int cpu)
234 do_openpic_setup_cpu();
237 #endif /* CONFIG_PPC_MULTIPLATFORM */
239 #ifdef CONFIG_PPC_PSERIES
241 /* Get state of physical CPU.
242 * Return codes:
243 * 0 - The processor is in the RTAS stopped state
244 * 1 - stop-self is in progress
245 * 2 - The processor is not in the RTAS stopped state
246 * -1 - Hardware Error
247 * -2 - Hardware Busy, Try again later.
249 int query_cpu_stopped(unsigned int pcpu)
251 int cpu_status;
252 int status, qcss_tok;
254 DBG(" -> query_cpu_stopped(%d)\n", pcpu);
255 qcss_tok = rtas_token("query-cpu-stopped-state");
256 if (qcss_tok == RTAS_UNKNOWN_SERVICE)
257 return -1;
258 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
259 if (status != 0) {
260 printk(KERN_ERR
261 "RTAS query-cpu-stopped-state failed: %i\n", status);
262 return status;
265 DBG(" <- query_cpu_stopped(), status: %d\n", cpu_status);
267 return cpu_status;
270 #ifdef CONFIG_HOTPLUG_CPU
272 int __cpu_disable(void)
274 /* FIXME: go put this in a header somewhere */
275 extern void xics_migrate_irqs_away(void);
277 systemcfg->processorCount--;
279 /*fix boot_cpuid here*/
280 if (smp_processor_id() == boot_cpuid)
281 boot_cpuid = any_online_cpu(cpu_online_map);
283 /* FIXME: abstract this to not be platform specific later on */
284 xics_migrate_irqs_away();
285 return 0;
288 void __cpu_die(unsigned int cpu)
290 int tries;
291 int cpu_status;
292 unsigned int pcpu = get_hard_smp_processor_id(cpu);
294 for (tries = 0; tries < 25; tries++) {
295 cpu_status = query_cpu_stopped(pcpu);
296 if (cpu_status == 0 || cpu_status == -1)
297 break;
298 set_current_state(TASK_UNINTERRUPTIBLE);
299 schedule_timeout(HZ/5);
301 if (cpu_status != 0) {
302 printk("Querying DEAD? cpu %i (%i) shows %i\n",
303 cpu, pcpu, cpu_status);
306 /* Isolation and deallocation are definatly done by
307 * drslot_chrp_cpu. If they were not they would be
308 * done here. Change isolate state to Isolate and
309 * change allocation-state to Unusable.
311 paca[cpu].cpu_start = 0;
313 /* So we can recognize if it fails to come up next time. */
314 cpu_callin_map[cpu] = 0;
317 /* Kill this cpu */
318 void cpu_die(void)
320 local_irq_disable();
321 /* Some hardware requires clearing the CPPR, while other hardware does not
322 * it is safe either way
324 pSeriesLP_cppr_info(0, 0);
325 rtas_stop_self();
326 /* Should never get here... */
327 BUG();
328 for(;;);
331 /* Search all cpu device nodes for an offline logical cpu. If a
332 * device node has a "ibm,my-drc-index" property (meaning this is an
333 * LPAR), paranoid-check whether we own the cpu. For each "thread"
334 * of a cpu, if it is offline and has the same hw index as before,
335 * grab that in preference.
337 static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
339 struct device_node *np = NULL;
340 unsigned int best = -1U;
342 while ((np = of_find_node_by_type(np, "cpu"))) {
343 int nr_threads, len;
344 u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
345 u32 *tid = (u32 *)
346 get_property(np, "ibm,ppc-interrupt-server#s", &len);
348 if (!tid)
349 tid = (u32 *)get_property(np, "reg", &len);
351 if (!tid)
352 continue;
354 /* If there is a drc-index, make sure that we own
355 * the cpu.
357 if (index) {
358 int state;
359 int rc = rtas_get_sensor(9003, *index, &state);
360 if (rc != 0 || state != 1)
361 continue;
364 nr_threads = len / sizeof(u32);
366 while (nr_threads--) {
367 if (0 == query_cpu_stopped(tid[nr_threads])) {
368 best = tid[nr_threads];
369 if (best == old_hwindex)
370 goto out;
374 out:
375 of_node_put(np);
376 return best;
380 * smp_startup_cpu() - start the given cpu
382 * At boot time, there is nothing to do. At run-time, call RTAS with
383 * the appropriate start location, if the cpu is in the RTAS stopped
384 * state.
386 * Returns:
387 * 0 - failure
388 * 1 - success
390 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
392 int status;
393 unsigned long start_here = __pa((u32)*((unsigned long *)
394 pseries_secondary_smp_init));
395 unsigned int pcpu;
397 /* At boot time the cpus are already spinning in hold
398 * loops, so nothing to do. */
399 if (system_state < SYSTEM_RUNNING)
400 return 1;
402 pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));
403 if (pcpu == -1U) {
404 printk(KERN_INFO "No more cpus available, failing\n");
405 return 0;
408 /* Fixup atomic count: it exited inside IRQ handler. */
409 paca[lcpu].__current->thread_info->preempt_count = 0;
411 /* At boot this is done in prom.c. */
412 paca[lcpu].hw_cpu_id = pcpu;
414 status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,
415 pcpu, start_here, lcpu);
416 if (status != 0) {
417 printk(KERN_ERR "start-cpu failed: %i\n", status);
418 return 0;
420 return 1;
422 #else /* ... CONFIG_HOTPLUG_CPU */
423 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
425 return 1;
427 #endif /* CONFIG_HOTPLUG_CPU */
429 static void smp_pSeries_kick_cpu(int nr)
431 BUG_ON(nr < 0 || nr >= NR_CPUS);
433 if (!smp_startup_cpu(nr))
434 return;
437 * The processor is currently spinning, waiting for the
438 * cpu_start field to become non-zero After we set cpu_start,
439 * the processor will continue on to secondary_start
441 paca[nr].cpu_start = 1;
443 #endif /* CONFIG_PPC_PSERIES */
445 static void __init smp_space_timers(unsigned int max_cpus)
447 int i;
448 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
449 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
451 for_each_cpu(i) {
452 if (i != boot_cpuid) {
453 paca[i].next_jiffy_update_tb =
454 previous_tb + offset;
455 previous_tb = paca[i].next_jiffy_update_tb;
460 #ifdef CONFIG_PPC_PSERIES
461 static void vpa_init(int cpu)
463 unsigned long flags, pcpu = get_hard_smp_processor_id(cpu);
465 /* Register the Virtual Processor Area (VPA) */
466 flags = 1UL << (63 - 18);
467 register_vpa(flags, pcpu, __pa((unsigned long)&(paca[cpu].lppaca)));
470 static inline void smp_xics_do_message(int cpu, int msg)
472 set_bit(msg, &xics_ipi_message[cpu].value);
473 mb();
474 xics_cause_IPI(cpu);
477 static void smp_xics_message_pass(int target, int msg)
479 unsigned int i;
481 if (target < NR_CPUS) {
482 smp_xics_do_message(target, msg);
483 } else {
484 for_each_online_cpu(i) {
485 if (target == MSG_ALL_BUT_SELF
486 && i == smp_processor_id())
487 continue;
488 smp_xics_do_message(i, msg);
493 extern void xics_request_IPIs(void);
495 static int __init smp_xics_probe(void)
497 #ifdef CONFIG_SMP
498 xics_request_IPIs();
499 #endif
501 return cpus_weight(cpu_possible_map);
504 static void __devinit smp_xics_setup_cpu(int cpu)
506 if (cpu != boot_cpuid)
507 xics_setup_cpu();
510 static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
511 static unsigned long timebase = 0;
513 static void __devinit pSeries_give_timebase(void)
515 spin_lock(&timebase_lock);
516 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
517 timebase = get_tb();
518 spin_unlock(&timebase_lock);
520 while (timebase)
521 barrier();
522 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
525 static void __devinit pSeries_take_timebase(void)
527 while (!timebase)
528 barrier();
529 spin_lock(&timebase_lock);
530 set_tb(timebase >> 32, timebase & 0xffffffff);
531 timebase = 0;
532 spin_unlock(&timebase_lock);
535 static struct smp_ops_t pSeries_openpic_smp_ops = {
536 .message_pass = smp_openpic_message_pass,
537 .probe = smp_openpic_probe,
538 .kick_cpu = smp_pSeries_kick_cpu,
539 .setup_cpu = smp_openpic_setup_cpu,
542 static struct smp_ops_t pSeries_xics_smp_ops = {
543 .message_pass = smp_xics_message_pass,
544 .probe = smp_xics_probe,
545 .kick_cpu = smp_pSeries_kick_cpu,
546 .setup_cpu = smp_xics_setup_cpu,
549 /* This is called very early */
550 void __init smp_init_pSeries(void)
552 int ret, i;
554 DBG(" -> smp_init_pSeries()\n");
556 if (naca->interrupt_controller == IC_OPEN_PIC)
557 smp_ops = &pSeries_openpic_smp_ops;
558 else
559 smp_ops = &pSeries_xics_smp_ops;
561 /* Start secondary threads on SMT systems; primary threads
562 * are already in the running state.
564 for_each_present_cpu(i) {
565 if (query_cpu_stopped(get_hard_smp_processor_id(i)) == 0) {
566 printk("%16.16x : starting thread\n", i);
567 DBG("%16.16x : starting thread\n", i);
568 rtas_call(rtas_token("start-cpu"), 3, 1, &ret,
569 get_hard_smp_processor_id(i),
570 __pa((u32)*((unsigned long *)
571 pseries_secondary_smp_init)),
576 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
577 vpa_init(boot_cpuid);
579 /* Non-lpar has additional take/give timebase */
580 if (systemcfg->platform == PLATFORM_PSERIES) {
581 smp_ops->give_timebase = pSeries_give_timebase;
582 smp_ops->take_timebase = pSeries_take_timebase;
586 DBG(" <- smp_init_pSeries()\n");
588 #endif /* CONFIG_PPC_PSERIES */
590 void smp_local_timer_interrupt(struct pt_regs * regs)
592 update_process_times(user_mode(regs));
595 void smp_message_recv(int msg, struct pt_regs *regs)
597 switch(msg) {
598 case PPC_MSG_CALL_FUNCTION:
599 smp_call_function_interrupt();
600 break;
601 case PPC_MSG_RESCHEDULE:
602 /* XXX Do we have to do this? */
603 set_need_resched();
604 break;
605 #if 0
606 case PPC_MSG_MIGRATE_TASK:
607 /* spare */
608 break;
609 #endif
610 #ifdef CONFIG_DEBUGGER
611 case PPC_MSG_DEBUGGER_BREAK:
612 debugger_ipi(regs);
613 break;
614 #endif
615 default:
616 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
617 smp_processor_id(), msg);
618 break;
622 void smp_send_reschedule(int cpu)
624 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
627 #ifdef CONFIG_DEBUGGER
628 void smp_send_debugger_break(int cpu)
630 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
632 #endif
634 static void stop_this_cpu(void *dummy)
636 local_irq_disable();
637 while (1)
641 void smp_send_stop(void)
643 smp_call_function(stop_this_cpu, NULL, 1, 0);
647 * Structure and data for smp_call_function(). This is designed to minimise
648 * static memory requirements. It also looks cleaner.
649 * Stolen from the i386 version.
651 static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
653 static struct call_data_struct {
654 void (*func) (void *info);
655 void *info;
656 atomic_t started;
657 atomic_t finished;
658 int wait;
659 } *call_data;
661 /* delay of at least 8 seconds on 1GHz cpu */
662 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
665 * This function sends a 'generic call function' IPI to all other CPUs
666 * in the system.
668 * [SUMMARY] Run a function on all other CPUs.
669 * <func> The function to run. This must be fast and non-blocking.
670 * <info> An arbitrary pointer to pass to the function.
671 * <nonatomic> currently unused.
672 * <wait> If true, wait (atomically) until function has completed on other CPUs.
673 * [RETURNS] 0 on success, else a negative status code. Does not return until
674 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
676 * You must not call this function with disabled interrupts or from a
677 * hardware interrupt handler or from a bottom half handler.
679 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
680 int wait)
682 struct call_data_struct data;
683 int ret = -1, cpus;
684 unsigned long timeout;
686 /* Can deadlock when called with interrupts disabled */
687 WARN_ON(irqs_disabled());
689 data.func = func;
690 data.info = info;
691 atomic_set(&data.started, 0);
692 data.wait = wait;
693 if (wait)
694 atomic_set(&data.finished, 0);
696 spin_lock(&call_lock);
697 /* Must grab online cpu count with preempt disabled, otherwise
698 * it can change. */
699 cpus = num_online_cpus() - 1;
700 if (!cpus) {
701 ret = 0;
702 goto out;
705 call_data = &data;
706 wmb();
707 /* Send a message to all other CPUs and wait for them to respond */
708 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
710 /* Wait for response */
711 timeout = SMP_CALL_TIMEOUT;
712 while (atomic_read(&data.started) != cpus) {
713 HMT_low();
714 if (--timeout == 0) {
715 printk("smp_call_function on cpu %d: other cpus not "
716 "responding (%d)\n", smp_processor_id(),
717 atomic_read(&data.started));
718 debugger(NULL);
719 goto out;
723 if (wait) {
724 timeout = SMP_CALL_TIMEOUT;
725 while (atomic_read(&data.finished) != cpus) {
726 HMT_low();
727 if (--timeout == 0) {
728 printk("smp_call_function on cpu %d: other "
729 "cpus not finishing (%d/%d)\n",
730 smp_processor_id(),
731 atomic_read(&data.finished),
732 atomic_read(&data.started));
733 debugger(NULL);
734 goto out;
739 ret = 0;
741 out:
742 call_data = NULL;
743 HMT_medium();
744 spin_unlock(&call_lock);
745 return ret;
748 EXPORT_SYMBOL(smp_call_function);
750 void smp_call_function_interrupt(void)
752 void (*func) (void *info);
753 void *info;
754 int wait;
756 /* call_data will be NULL if the sender timed out while
757 * waiting on us to receive the call.
759 if (!call_data)
760 return;
762 func = call_data->func;
763 info = call_data->info;
764 wait = call_data->wait;
766 if (!wait)
767 smp_mb__before_atomic_inc();
770 * Notify initiating CPU that I've grabbed the data and am
771 * about to execute the function
773 atomic_inc(&call_data->started);
775 * At this point the info structure may be out of scope unless wait==1
777 (*func)(info);
778 if (wait) {
779 smp_mb__before_atomic_inc();
780 atomic_inc(&call_data->finished);
784 extern unsigned long decr_overclock;
785 extern struct gettimeofday_struct do_gtod;
787 struct thread_info *current_set[NR_CPUS];
789 DECLARE_PER_CPU(unsigned int, pvr);
791 static void __devinit smp_store_cpu_info(int id)
793 per_cpu(pvr, id) = mfspr(SPRN_PVR);
796 static void __init smp_create_idle(unsigned int cpu)
798 struct task_struct *p;
800 /* create a process for the processor */
801 p = fork_idle(cpu);
802 if (IS_ERR(p))
803 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
804 paca[cpu].__current = p;
805 current_set[cpu] = p->thread_info;
808 void __init smp_prepare_cpus(unsigned int max_cpus)
810 unsigned int cpu;
813 * setup_cpu may need to be called on the boot cpu. We havent
814 * spun any cpus up but lets be paranoid.
816 BUG_ON(boot_cpuid != smp_processor_id());
818 /* Fixup boot cpu */
819 smp_store_cpu_info(boot_cpuid);
820 cpu_callin_map[boot_cpuid] = 1;
822 #ifndef CONFIG_PPC_ISERIES
823 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
826 * Should update do_gtod.stamp_xsec.
827 * For now we leave it which means the time can be some
828 * number of msecs off until someone does a settimeofday()
830 do_gtod.tb_orig_stamp = tb_last_stamp;
831 systemcfg->tb_orig_stamp = tb_last_stamp;
832 #endif
834 max_cpus = smp_ops->probe();
836 /* Backup CPU 0 state if necessary */
837 __save_cpu_setup();
839 smp_space_timers(max_cpus);
841 for_each_cpu(cpu)
842 if (cpu != boot_cpuid)
843 smp_create_idle(cpu);
846 void __devinit smp_prepare_boot_cpu(void)
848 BUG_ON(smp_processor_id() != boot_cpuid);
850 cpu_set(boot_cpuid, cpu_online_map);
852 paca[boot_cpuid].__current = current;
853 current_set[boot_cpuid] = current->thread_info;
856 int __devinit __cpu_up(unsigned int cpu)
858 int c;
860 /* At boot, don't bother with non-present cpus -JSCHOPP */
861 if (system_state < SYSTEM_RUNNING && !cpu_present(cpu))
862 return -ENOENT;
864 paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
866 if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
867 void *tmp;
869 /* maximum of 48 CPUs on machines with a segment table */
870 if (cpu >= 48)
871 BUG();
873 tmp = &stab_array[PAGE_SIZE * cpu];
874 memset(tmp, 0, PAGE_SIZE);
875 paca[cpu].stab_addr = (unsigned long)tmp;
876 paca[cpu].stab_real = virt_to_abs(tmp);
879 /* The information for processor bringup must
880 * be written out to main store before we release
881 * the processor.
883 mb();
885 /* wake up cpus */
886 smp_ops->kick_cpu(cpu);
889 * wait to see if the cpu made a callin (is actually up).
890 * use this value that I found through experimentation.
891 * -- Cort
893 if (system_state < SYSTEM_RUNNING)
894 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
895 udelay(100);
896 #ifdef CONFIG_HOTPLUG_CPU
897 else
899 * CPUs can take much longer to come up in the
900 * hotplug case. Wait five seconds.
902 for (c = 25; c && !cpu_callin_map[cpu]; c--) {
903 set_current_state(TASK_UNINTERRUPTIBLE);
904 schedule_timeout(HZ/5);
906 #endif
908 if (!cpu_callin_map[cpu]) {
909 printk("Processor %u is stuck.\n", cpu);
910 return -ENOENT;
913 printk("Processor %u found.\n", cpu);
915 if (smp_ops->give_timebase)
916 smp_ops->give_timebase();
918 /* Wait until cpu puts itself in the online map */
919 while (!cpu_online(cpu))
920 cpu_relax();
922 return 0;
925 extern unsigned int default_distrib_server;
926 /* Activate a secondary processor. */
927 int __devinit start_secondary(void *unused)
929 unsigned int cpu = smp_processor_id();
931 atomic_inc(&init_mm.mm_count);
932 current->active_mm = &init_mm;
934 smp_store_cpu_info(cpu);
935 set_dec(paca[cpu].default_decr);
936 cpu_callin_map[cpu] = 1;
938 smp_ops->setup_cpu(cpu);
939 if (smp_ops->take_timebase)
940 smp_ops->take_timebase();
942 #ifdef CONFIG_PPC_PSERIES
943 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
944 vpa_init(cpu);
947 #ifdef CONFIG_IRQ_ALL_CPUS
948 /* Put the calling processor into the GIQ. This is really only
949 * necessary from a secondary thread as the OF start-cpu interface
950 * performs this function for us on primary threads.
952 /* TODO: 9005 is #defined in rtas-proc.c -- move to a header */
953 rtas_set_indicator(9005, default_distrib_server, 1);
954 #endif
955 #endif
957 spin_lock(&call_lock);
958 cpu_set(cpu, cpu_online_map);
959 spin_unlock(&call_lock);
961 local_irq_enable();
963 return cpu_idle(NULL);
966 int setup_profiling_timer(unsigned int multiplier)
968 return 0;
971 void __init smp_cpus_done(unsigned int max_cpus)
973 cpumask_t old_mask;
975 /* We want the setup_cpu() here to be called from CPU 0, but our
976 * init thread may have been "borrowed" by another CPU in the meantime
977 * se we pin us down to CPU 0 for a short while
979 old_mask = current->cpus_allowed;
980 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
982 smp_ops->setup_cpu(boot_cpuid);
984 /* XXX fix this, xics currently relies on it - Anton */
985 smp_threads_ready = 1;
987 set_cpus_allowed(current, old_mask);
990 * We know at boot the maximum number of cpus we can add to
991 * a partition and set cpu_possible_map accordingly. cpu_present_map
992 * needs to match for the hotplug code to allow us to hot add
993 * any offline cpus.
995 cpu_present_map = cpu_possible_map;