4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/cache.h>
32 #include <linux/err.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
36 #include <asm/ptrace.h>
37 #include <asm/atomic.h>
40 #include <asm/pgtable.h>
46 #include <asm/iSeries/LparData.h>
47 #include <asm/iSeries/HvCall.h>
48 #include <asm/iSeries/HvCallCfg.h>
50 #include <asm/ppcdebug.h>
52 #include <asm/machdep.h>
54 #include <asm/cputable.h>
55 #include <asm/system.h>
57 #include <asm/plpar_wrappers.h>
60 #define DBG(fmt...) udbg_printf(fmt)
65 int smp_threads_ready
;
66 unsigned long cache_decay_ticks
;
68 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
69 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
70 cpumask_t cpu_sibling_map
[NR_CPUS
] = { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
72 EXPORT_SYMBOL(cpu_online_map
);
73 EXPORT_SYMBOL(cpu_possible_map
);
75 struct smp_ops_t
*smp_ops
;
77 static volatile unsigned int cpu_callin_map
[NR_CPUS
];
79 extern unsigned char stab_array
[];
81 extern int cpu_idle(void *unused
);
82 void smp_call_function_interrupt(void);
83 extern long register_vpa(unsigned long flags
, unsigned long proc
,
86 int smt_enabled_at_boot
= 1;
88 /* Low level assembly function used to backup CPU 0 state */
89 extern void __save_cpu_setup(void);
91 extern void pseries_secondary_smp_init(unsigned long);
93 #ifdef CONFIG_PPC_ISERIES
94 static unsigned long iSeries_smp_message
[NR_CPUS
];
96 void iSeries_smp_message_recv( struct pt_regs
* regs
)
98 int cpu
= smp_processor_id();
101 if ( num_online_cpus() < 2 )
104 for ( msg
= 0; msg
< 4; ++msg
)
105 if ( test_and_clear_bit( msg
, &iSeries_smp_message
[cpu
] ) )
106 smp_message_recv( msg
, regs
);
109 static inline void smp_iSeries_do_message(int cpu
, int msg
)
111 set_bit(msg
, &iSeries_smp_message
[cpu
]);
112 HvCall_sendIPI(&(paca
[cpu
]));
115 static void smp_iSeries_message_pass(int target
, int msg
)
119 if (target
< NR_CPUS
)
120 smp_iSeries_do_message(target
, msg
);
122 for_each_online_cpu(i
) {
123 if (target
== MSG_ALL_BUT_SELF
124 && i
== smp_processor_id())
126 smp_iSeries_do_message(i
, msg
);
131 static int smp_iSeries_numProcs(void)
136 for (i
=0; i
< NR_CPUS
; ++i
) {
137 if (paca
[i
].lppaca
.xDynProcStatus
< 2) {
138 cpu_set(i
, cpu_possible_map
);
139 cpu_set(i
, cpu_present_map
);
146 static int smp_iSeries_probe(void)
151 for (i
=0; i
< NR_CPUS
; ++i
) {
152 if (paca
[i
].lppaca
.xDynProcStatus
< 2) {
153 /*paca[i].active = 1;*/
161 static void smp_iSeries_kick_cpu(int nr
)
163 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
165 /* Verify that our partition has a processor nr */
166 if (paca
[nr
].lppaca
.xDynProcStatus
>= 2)
169 /* The processor is currently spinning, waiting
170 * for the cpu_start field to become non-zero
171 * After we set cpu_start, the processor will
172 * continue on to secondary_start in iSeries_head.S
174 paca
[nr
].cpu_start
= 1;
177 static void __devinit
smp_iSeries_setup_cpu(int nr
)
181 static struct smp_ops_t iSeries_smp_ops
= {
182 .message_pass
= smp_iSeries_message_pass
,
183 .probe
= smp_iSeries_probe
,
184 .kick_cpu
= smp_iSeries_kick_cpu
,
185 .setup_cpu
= smp_iSeries_setup_cpu
,
188 /* This is called very early. */
189 void __init
smp_init_iSeries(void)
191 smp_ops
= &iSeries_smp_ops
;
192 systemcfg
->processorCount
= smp_iSeries_numProcs();
196 #ifdef CONFIG_PPC_MULTIPLATFORM
197 void smp_openpic_message_pass(int target
, int msg
)
199 /* make sure we're sending something that translates to an IPI */
201 printk("SMP %d: smp_message_pass: unknown msg %d\n",
202 smp_processor_id(), msg
);
208 openpic_cause_IPI(msg
, 0xffffffff);
210 case MSG_ALL_BUT_SELF
:
211 openpic_cause_IPI(msg
,
212 0xffffffff & ~(1 << smp_processor_id()));
215 openpic_cause_IPI(msg
, 1<<target
);
220 static int __init
smp_openpic_probe(void)
224 nr_cpus
= cpus_weight(cpu_possible_map
);
227 openpic_request_IPIs();
232 static void __devinit
smp_openpic_setup_cpu(int cpu
)
234 do_openpic_setup_cpu();
237 #endif /* CONFIG_PPC_MULTIPLATFORM */
239 #ifdef CONFIG_PPC_PSERIES
241 /* Get state of physical CPU.
243 * 0 - The processor is in the RTAS stopped state
244 * 1 - stop-self is in progress
245 * 2 - The processor is not in the RTAS stopped state
246 * -1 - Hardware Error
247 * -2 - Hardware Busy, Try again later.
249 int query_cpu_stopped(unsigned int pcpu
)
252 int status
, qcss_tok
;
254 DBG(" -> query_cpu_stopped(%d)\n", pcpu
);
255 qcss_tok
= rtas_token("query-cpu-stopped-state");
256 if (qcss_tok
== RTAS_UNKNOWN_SERVICE
)
258 status
= rtas_call(qcss_tok
, 1, 2, &cpu_status
, pcpu
);
261 "RTAS query-cpu-stopped-state failed: %i\n", status
);
265 DBG(" <- query_cpu_stopped(), status: %d\n", cpu_status
);
270 #ifdef CONFIG_HOTPLUG_CPU
272 int __cpu_disable(void)
274 /* FIXME: go put this in a header somewhere */
275 extern void xics_migrate_irqs_away(void);
277 systemcfg
->processorCount
--;
279 /*fix boot_cpuid here*/
280 if (smp_processor_id() == boot_cpuid
)
281 boot_cpuid
= any_online_cpu(cpu_online_map
);
283 /* FIXME: abstract this to not be platform specific later on */
284 xics_migrate_irqs_away();
288 void __cpu_die(unsigned int cpu
)
292 unsigned int pcpu
= get_hard_smp_processor_id(cpu
);
294 for (tries
= 0; tries
< 25; tries
++) {
295 cpu_status
= query_cpu_stopped(pcpu
);
296 if (cpu_status
== 0 || cpu_status
== -1)
298 set_current_state(TASK_UNINTERRUPTIBLE
);
299 schedule_timeout(HZ
/5);
301 if (cpu_status
!= 0) {
302 printk("Querying DEAD? cpu %i (%i) shows %i\n",
303 cpu
, pcpu
, cpu_status
);
306 /* Isolation and deallocation are definatly done by
307 * drslot_chrp_cpu. If they were not they would be
308 * done here. Change isolate state to Isolate and
309 * change allocation-state to Unusable.
311 paca
[cpu
].cpu_start
= 0;
313 /* So we can recognize if it fails to come up next time. */
314 cpu_callin_map
[cpu
] = 0;
321 /* Some hardware requires clearing the CPPR, while other hardware does not
322 * it is safe either way
324 pSeriesLP_cppr_info(0, 0);
326 /* Should never get here... */
331 /* Search all cpu device nodes for an offline logical cpu. If a
332 * device node has a "ibm,my-drc-index" property (meaning this is an
333 * LPAR), paranoid-check whether we own the cpu. For each "thread"
334 * of a cpu, if it is offline and has the same hw index as before,
335 * grab that in preference.
337 static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex
)
339 struct device_node
*np
= NULL
;
340 unsigned int best
= -1U;
342 while ((np
= of_find_node_by_type(np
, "cpu"))) {
344 u32
*index
= (u32
*)get_property(np
, "ibm,my-drc-index", NULL
);
346 get_property(np
, "ibm,ppc-interrupt-server#s", &len
);
349 tid
= (u32
*)get_property(np
, "reg", &len
);
354 /* If there is a drc-index, make sure that we own
359 int rc
= rtas_get_sensor(9003, *index
, &state
);
360 if (rc
!= 0 || state
!= 1)
364 nr_threads
= len
/ sizeof(u32
);
366 while (nr_threads
--) {
367 if (0 == query_cpu_stopped(tid
[nr_threads
])) {
368 best
= tid
[nr_threads
];
369 if (best
== old_hwindex
)
380 * smp_startup_cpu() - start the given cpu
382 * At boot time, there is nothing to do. At run-time, call RTAS with
383 * the appropriate start location, if the cpu is in the RTAS stopped
390 static inline int __devinit
smp_startup_cpu(unsigned int lcpu
)
393 unsigned long start_here
= __pa((u32
)*((unsigned long *)
394 pseries_secondary_smp_init
));
397 /* At boot time the cpus are already spinning in hold
398 * loops, so nothing to do. */
399 if (system_state
< SYSTEM_RUNNING
)
402 pcpu
= find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu
));
404 printk(KERN_INFO
"No more cpus available, failing\n");
408 /* Fixup atomic count: it exited inside IRQ handler. */
409 paca
[lcpu
].__current
->thread_info
->preempt_count
= 0;
411 /* At boot this is done in prom.c. */
412 paca
[lcpu
].hw_cpu_id
= pcpu
;
414 status
= rtas_call(rtas_token("start-cpu"), 3, 1, NULL
,
415 pcpu
, start_here
, lcpu
);
417 printk(KERN_ERR
"start-cpu failed: %i\n", status
);
422 #else /* ... CONFIG_HOTPLUG_CPU */
423 static inline int __devinit
smp_startup_cpu(unsigned int lcpu
)
427 #endif /* CONFIG_HOTPLUG_CPU */
429 static void smp_pSeries_kick_cpu(int nr
)
431 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
433 if (!smp_startup_cpu(nr
))
437 * The processor is currently spinning, waiting for the
438 * cpu_start field to become non-zero After we set cpu_start,
439 * the processor will continue on to secondary_start
441 paca
[nr
].cpu_start
= 1;
443 #endif /* CONFIG_PPC_PSERIES */
445 static void __init
smp_space_timers(unsigned int max_cpus
)
448 unsigned long offset
= tb_ticks_per_jiffy
/ max_cpus
;
449 unsigned long previous_tb
= paca
[boot_cpuid
].next_jiffy_update_tb
;
452 if (i
!= boot_cpuid
) {
453 paca
[i
].next_jiffy_update_tb
=
454 previous_tb
+ offset
;
455 previous_tb
= paca
[i
].next_jiffy_update_tb
;
460 #ifdef CONFIG_PPC_PSERIES
461 static void vpa_init(int cpu
)
463 unsigned long flags
, pcpu
= get_hard_smp_processor_id(cpu
);
465 /* Register the Virtual Processor Area (VPA) */
466 flags
= 1UL << (63 - 18);
467 register_vpa(flags
, pcpu
, __pa((unsigned long)&(paca
[cpu
].lppaca
)));
470 static inline void smp_xics_do_message(int cpu
, int msg
)
472 set_bit(msg
, &xics_ipi_message
[cpu
].value
);
477 static void smp_xics_message_pass(int target
, int msg
)
481 if (target
< NR_CPUS
) {
482 smp_xics_do_message(target
, msg
);
484 for_each_online_cpu(i
) {
485 if (target
== MSG_ALL_BUT_SELF
486 && i
== smp_processor_id())
488 smp_xics_do_message(i
, msg
);
493 extern void xics_request_IPIs(void);
495 static int __init
smp_xics_probe(void)
501 return cpus_weight(cpu_possible_map
);
504 static void __devinit
smp_xics_setup_cpu(int cpu
)
506 if (cpu
!= boot_cpuid
)
510 static spinlock_t timebase_lock
= SPIN_LOCK_UNLOCKED
;
511 static unsigned long timebase
= 0;
513 static void __devinit
pSeries_give_timebase(void)
515 spin_lock(&timebase_lock
);
516 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL
);
518 spin_unlock(&timebase_lock
);
522 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL
);
525 static void __devinit
pSeries_take_timebase(void)
529 spin_lock(&timebase_lock
);
530 set_tb(timebase
>> 32, timebase
& 0xffffffff);
532 spin_unlock(&timebase_lock
);
535 static struct smp_ops_t pSeries_openpic_smp_ops
= {
536 .message_pass
= smp_openpic_message_pass
,
537 .probe
= smp_openpic_probe
,
538 .kick_cpu
= smp_pSeries_kick_cpu
,
539 .setup_cpu
= smp_openpic_setup_cpu
,
542 static struct smp_ops_t pSeries_xics_smp_ops
= {
543 .message_pass
= smp_xics_message_pass
,
544 .probe
= smp_xics_probe
,
545 .kick_cpu
= smp_pSeries_kick_cpu
,
546 .setup_cpu
= smp_xics_setup_cpu
,
549 /* This is called very early */
550 void __init
smp_init_pSeries(void)
554 DBG(" -> smp_init_pSeries()\n");
556 if (naca
->interrupt_controller
== IC_OPEN_PIC
)
557 smp_ops
= &pSeries_openpic_smp_ops
;
559 smp_ops
= &pSeries_xics_smp_ops
;
561 /* Start secondary threads on SMT systems; primary threads
562 * are already in the running state.
564 for_each_present_cpu(i
) {
565 if (query_cpu_stopped(get_hard_smp_processor_id(i
)) == 0) {
566 printk("%16.16x : starting thread\n", i
);
567 DBG("%16.16x : starting thread\n", i
);
568 rtas_call(rtas_token("start-cpu"), 3, 1, &ret
,
569 get_hard_smp_processor_id(i
),
570 __pa((u32
)*((unsigned long *)
571 pseries_secondary_smp_init
)),
576 if (cur_cpu_spec
->firmware_features
& FW_FEATURE_SPLPAR
)
577 vpa_init(boot_cpuid
);
579 /* Non-lpar has additional take/give timebase */
580 if (systemcfg
->platform
== PLATFORM_PSERIES
) {
581 smp_ops
->give_timebase
= pSeries_give_timebase
;
582 smp_ops
->take_timebase
= pSeries_take_timebase
;
586 DBG(" <- smp_init_pSeries()\n");
588 #endif /* CONFIG_PPC_PSERIES */
590 void smp_local_timer_interrupt(struct pt_regs
* regs
)
592 update_process_times(user_mode(regs
));
595 void smp_message_recv(int msg
, struct pt_regs
*regs
)
598 case PPC_MSG_CALL_FUNCTION
:
599 smp_call_function_interrupt();
601 case PPC_MSG_RESCHEDULE
:
602 /* XXX Do we have to do this? */
606 case PPC_MSG_MIGRATE_TASK
:
610 #ifdef CONFIG_DEBUGGER
611 case PPC_MSG_DEBUGGER_BREAK
:
616 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
617 smp_processor_id(), msg
);
622 void smp_send_reschedule(int cpu
)
624 smp_ops
->message_pass(cpu
, PPC_MSG_RESCHEDULE
);
627 #ifdef CONFIG_DEBUGGER
628 void smp_send_debugger_break(int cpu
)
630 smp_ops
->message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
634 static void stop_this_cpu(void *dummy
)
641 void smp_send_stop(void)
643 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
647 * Structure and data for smp_call_function(). This is designed to minimise
648 * static memory requirements. It also looks cleaner.
649 * Stolen from the i386 version.
651 static spinlock_t call_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
653 static struct call_data_struct
{
654 void (*func
) (void *info
);
661 /* delay of at least 8 seconds on 1GHz cpu */
662 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
665 * This function sends a 'generic call function' IPI to all other CPUs
668 * [SUMMARY] Run a function on all other CPUs.
669 * <func> The function to run. This must be fast and non-blocking.
670 * <info> An arbitrary pointer to pass to the function.
671 * <nonatomic> currently unused.
672 * <wait> If true, wait (atomically) until function has completed on other CPUs.
673 * [RETURNS] 0 on success, else a negative status code. Does not return until
674 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
676 * You must not call this function with disabled interrupts or from a
677 * hardware interrupt handler or from a bottom half handler.
679 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
682 struct call_data_struct data
;
684 unsigned long timeout
;
686 /* Can deadlock when called with interrupts disabled */
687 WARN_ON(irqs_disabled());
691 atomic_set(&data
.started
, 0);
694 atomic_set(&data
.finished
, 0);
696 spin_lock(&call_lock
);
697 /* Must grab online cpu count with preempt disabled, otherwise
699 cpus
= num_online_cpus() - 1;
707 /* Send a message to all other CPUs and wait for them to respond */
708 smp_ops
->message_pass(MSG_ALL_BUT_SELF
, PPC_MSG_CALL_FUNCTION
);
710 /* Wait for response */
711 timeout
= SMP_CALL_TIMEOUT
;
712 while (atomic_read(&data
.started
) != cpus
) {
714 if (--timeout
== 0) {
715 printk("smp_call_function on cpu %d: other cpus not "
716 "responding (%d)\n", smp_processor_id(),
717 atomic_read(&data
.started
));
724 timeout
= SMP_CALL_TIMEOUT
;
725 while (atomic_read(&data
.finished
) != cpus
) {
727 if (--timeout
== 0) {
728 printk("smp_call_function on cpu %d: other "
729 "cpus not finishing (%d/%d)\n",
731 atomic_read(&data
.finished
),
732 atomic_read(&data
.started
));
744 spin_unlock(&call_lock
);
748 EXPORT_SYMBOL(smp_call_function
);
750 void smp_call_function_interrupt(void)
752 void (*func
) (void *info
);
756 /* call_data will be NULL if the sender timed out while
757 * waiting on us to receive the call.
762 func
= call_data
->func
;
763 info
= call_data
->info
;
764 wait
= call_data
->wait
;
767 smp_mb__before_atomic_inc();
770 * Notify initiating CPU that I've grabbed the data and am
771 * about to execute the function
773 atomic_inc(&call_data
->started
);
775 * At this point the info structure may be out of scope unless wait==1
779 smp_mb__before_atomic_inc();
780 atomic_inc(&call_data
->finished
);
784 extern unsigned long decr_overclock
;
785 extern struct gettimeofday_struct do_gtod
;
787 struct thread_info
*current_set
[NR_CPUS
];
789 DECLARE_PER_CPU(unsigned int, pvr
);
791 static void __devinit
smp_store_cpu_info(int id
)
793 per_cpu(pvr
, id
) = mfspr(SPRN_PVR
);
796 static void __init
smp_create_idle(unsigned int cpu
)
798 struct task_struct
*p
;
800 /* create a process for the processor */
803 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
804 paca
[cpu
].__current
= p
;
805 current_set
[cpu
] = p
->thread_info
;
808 void __init
smp_prepare_cpus(unsigned int max_cpus
)
813 * setup_cpu may need to be called on the boot cpu. We havent
814 * spun any cpus up but lets be paranoid.
816 BUG_ON(boot_cpuid
!= smp_processor_id());
819 smp_store_cpu_info(boot_cpuid
);
820 cpu_callin_map
[boot_cpuid
] = 1;
822 #ifndef CONFIG_PPC_ISERIES
823 paca
[boot_cpuid
].next_jiffy_update_tb
= tb_last_stamp
= get_tb();
826 * Should update do_gtod.stamp_xsec.
827 * For now we leave it which means the time can be some
828 * number of msecs off until someone does a settimeofday()
830 do_gtod
.tb_orig_stamp
= tb_last_stamp
;
831 systemcfg
->tb_orig_stamp
= tb_last_stamp
;
834 max_cpus
= smp_ops
->probe();
836 /* Backup CPU 0 state if necessary */
839 smp_space_timers(max_cpus
);
842 if (cpu
!= boot_cpuid
)
843 smp_create_idle(cpu
);
846 void __devinit
smp_prepare_boot_cpu(void)
848 BUG_ON(smp_processor_id() != boot_cpuid
);
850 cpu_set(boot_cpuid
, cpu_online_map
);
852 paca
[boot_cpuid
].__current
= current
;
853 current_set
[boot_cpuid
] = current
->thread_info
;
856 int __devinit
__cpu_up(unsigned int cpu
)
860 /* At boot, don't bother with non-present cpus -JSCHOPP */
861 if (system_state
< SYSTEM_RUNNING
&& !cpu_present(cpu
))
864 paca
[cpu
].default_decr
= tb_ticks_per_jiffy
/ decr_overclock
;
866 if (!(cur_cpu_spec
->cpu_features
& CPU_FTR_SLB
)) {
869 /* maximum of 48 CPUs on machines with a segment table */
873 tmp
= &stab_array
[PAGE_SIZE
* cpu
];
874 memset(tmp
, 0, PAGE_SIZE
);
875 paca
[cpu
].stab_addr
= (unsigned long)tmp
;
876 paca
[cpu
].stab_real
= virt_to_abs(tmp
);
879 /* The information for processor bringup must
880 * be written out to main store before we release
886 smp_ops
->kick_cpu(cpu
);
889 * wait to see if the cpu made a callin (is actually up).
890 * use this value that I found through experimentation.
893 if (system_state
< SYSTEM_RUNNING
)
894 for (c
= 5000; c
&& !cpu_callin_map
[cpu
]; c
--)
896 #ifdef CONFIG_HOTPLUG_CPU
899 * CPUs can take much longer to come up in the
900 * hotplug case. Wait five seconds.
902 for (c
= 25; c
&& !cpu_callin_map
[cpu
]; c
--) {
903 set_current_state(TASK_UNINTERRUPTIBLE
);
904 schedule_timeout(HZ
/5);
908 if (!cpu_callin_map
[cpu
]) {
909 printk("Processor %u is stuck.\n", cpu
);
913 printk("Processor %u found.\n", cpu
);
915 if (smp_ops
->give_timebase
)
916 smp_ops
->give_timebase();
918 /* Wait until cpu puts itself in the online map */
919 while (!cpu_online(cpu
))
925 extern unsigned int default_distrib_server
;
926 /* Activate a secondary processor. */
927 int __devinit
start_secondary(void *unused
)
929 unsigned int cpu
= smp_processor_id();
931 atomic_inc(&init_mm
.mm_count
);
932 current
->active_mm
= &init_mm
;
934 smp_store_cpu_info(cpu
);
935 set_dec(paca
[cpu
].default_decr
);
936 cpu_callin_map
[cpu
] = 1;
938 smp_ops
->setup_cpu(cpu
);
939 if (smp_ops
->take_timebase
)
940 smp_ops
->take_timebase();
942 #ifdef CONFIG_PPC_PSERIES
943 if (cur_cpu_spec
->firmware_features
& FW_FEATURE_SPLPAR
) {
947 #ifdef CONFIG_IRQ_ALL_CPUS
948 /* Put the calling processor into the GIQ. This is really only
949 * necessary from a secondary thread as the OF start-cpu interface
950 * performs this function for us on primary threads.
952 /* TODO: 9005 is #defined in rtas-proc.c -- move to a header */
953 rtas_set_indicator(9005, default_distrib_server
, 1);
957 spin_lock(&call_lock
);
958 cpu_set(cpu
, cpu_online_map
);
959 spin_unlock(&call_lock
);
963 return cpu_idle(NULL
);
966 int setup_profiling_timer(unsigned int multiplier
)
971 void __init
smp_cpus_done(unsigned int max_cpus
)
975 /* We want the setup_cpu() here to be called from CPU 0, but our
976 * init thread may have been "borrowed" by another CPU in the meantime
977 * se we pin us down to CPU 0 for a short while
979 old_mask
= current
->cpus_allowed
;
980 set_cpus_allowed(current
, cpumask_of_cpu(boot_cpuid
));
982 smp_ops
->setup_cpu(boot_cpuid
);
984 /* XXX fix this, xics currently relies on it - Anton */
985 smp_threads_ready
= 1;
987 set_cpus_allowed(current
, old_mask
);
990 * We know at boot the maximum number of cpus we can add to
991 * a partition and set cpu_possible_map accordingly. cpu_present_map
992 * needs to match for the hotplug code to allow us to hot add
995 cpu_present_map
= cpu_possible_map
;