2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2 or
19 * Felix Koop : NR_CPUS used properly
20 * Jose Renau : Handle single CPU case.
21 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
22 * Greg Wright : Fix for kernel stacks panic.
23 * Erich Boleyn : MP v1.4 and additional changes.
24 * Matthias Sattler : Changes for 2.1 kernel map.
25 * Michel Lespinasse : Changes for 2.1 kernel map.
26 * Michael Chastain : Change trampoline.S to gnu as.
27 * Alan Cox : Dumb bug: 'B' step PPro's are fine
28 * Ingo Molnar : Added APIC timers, based on code
30 * Ingo Molnar : various cleanups and rewrites
31 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
32 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
33 * Andi Kleen : Changed for SMP boot into long mode.
34 * Martin J. Bligh : Added support for multi-quad systems
35 * Dave Jones : Report invalid combinations of Athlon CPUs.
36 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
37 * Andi Kleen : Converted to new state machine.
38 * Ashok Raj : CPU hotplug support
39 * Glauber Costa : i386 and x86_64 integration
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/percpu.h>
47 #include <linux/bootmem.h>
48 #include <linux/err.h>
49 #include <linux/nmi.h>
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
63 #include <linux/mc146818rtc.h>
65 #include <mach_apic.h>
66 #include <mach_wakecpu.h>
67 #include <smpboot_hooks.h>
70 * FIXME: For x86_64, those are defined in other files. But moving them here,
71 * would make the setup areas dependent on smp, which is a loss. When we
72 * integrate apic between arches, we can probably do a better job, but
73 * right now, they'll stay here -- glommer
76 /* which logical CPU number maps to which CPU (physical APIC ID) */
77 u16 x86_cpu_to_apicid_init
[NR_CPUS
] __initdata
=
78 { [0 ... NR_CPUS
-1] = BAD_APICID
};
79 void *x86_cpu_to_apicid_early_ptr
;
80 DEFINE_PER_CPU(u16
, x86_cpu_to_apicid
) = BAD_APICID
;
81 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid
);
83 u16 x86_bios_cpu_apicid_init
[NR_CPUS
] __initdata
84 = { [0 ... NR_CPUS
-1] = BAD_APICID
};
85 void *x86_bios_cpu_apicid_early_ptr
;
86 DEFINE_PER_CPU(u16
, x86_bios_cpu_apicid
) = BAD_APICID
;
87 EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid
);
89 /* Bitmask of physically existing CPUs */
90 physid_mask_t phys_cpu_present_map
;
92 u8 apicid_2_node
[MAX_APICID
];
95 /* State of each CPU */
96 DEFINE_PER_CPU(int, cpu_state
) = { 0 };
98 /* Store all idle threads, this can be reused instead of creating
99 * a new thread. Also avoids complicated thread destroy functionality
102 #ifdef CONFIG_HOTPLUG_CPU
104 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
105 * removed after init for !CONFIG_HOTPLUG_CPU.
107 static DEFINE_PER_CPU(struct task_struct
*, idle_thread_array
);
108 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
109 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
111 struct task_struct
*idle_thread_array
[NR_CPUS
] __cpuinitdata
;
112 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
113 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
116 /* Number of siblings per CPU package */
117 int smp_num_siblings
= 1;
118 EXPORT_SYMBOL(smp_num_siblings
);
120 /* Last level cache ID of each logical CPU */
121 DEFINE_PER_CPU(u16
, cpu_llc_id
) = BAD_APICID
;
123 /* bitmap of online cpus */
124 cpumask_t cpu_online_map __read_mostly
;
125 EXPORT_SYMBOL(cpu_online_map
);
127 cpumask_t cpu_callin_map
;
128 cpumask_t cpu_callout_map
;
129 cpumask_t cpu_possible_map
;
130 EXPORT_SYMBOL(cpu_possible_map
);
132 /* representing HT siblings of each logical CPU */
133 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
);
134 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
136 /* representing HT and core siblings of each logical CPU */
137 DEFINE_PER_CPU(cpumask_t
, cpu_core_map
);
138 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
140 /* Per CPU bogomips and other parameters */
141 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86
, cpu_info
);
142 EXPORT_PER_CPU_SYMBOL(cpu_info
);
144 static atomic_t init_deasserted
;
146 static int boot_cpu_logical_apicid
;
148 /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
149 unsigned char *trampoline_base
= __va(SMP_TRAMPOLINE_BASE
);
151 /* representing cpus for which sibling maps can be computed */
152 static cpumask_t cpu_sibling_setup_map
;
154 /* Set if we find a B stepping CPU */
155 int __cpuinitdata smp_b_stepping
;
157 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
159 /* which logical CPUs are on which nodes */
160 cpumask_t node_to_cpumask_map
[MAX_NUMNODES
] __read_mostly
=
161 { [0 ... MAX_NUMNODES
-1] = CPU_MASK_NONE
};
162 EXPORT_SYMBOL(node_to_cpumask_map
);
163 /* which node each logical CPU is on */
164 int cpu_to_node_map
[NR_CPUS
] __read_mostly
= { [0 ... NR_CPUS
-1] = 0 };
165 EXPORT_SYMBOL(cpu_to_node_map
);
167 /* set up a mapping between cpu and node. */
168 static void map_cpu_to_node(int cpu
, int node
)
170 printk(KERN_INFO
"Mapping cpu %d to node %d\n", cpu
, node
);
171 cpu_set(cpu
, node_to_cpumask_map
[node
]);
172 cpu_to_node_map
[cpu
] = node
;
175 /* undo a mapping between cpu and node. */
176 static void unmap_cpu_to_node(int cpu
)
180 printk(KERN_INFO
"Unmapping cpu %d from all nodes\n", cpu
);
181 for (node
= 0; node
< MAX_NUMNODES
; node
++)
182 cpu_clear(cpu
, node_to_cpumask_map
[node
]);
183 cpu_to_node_map
[cpu
] = 0;
185 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
186 #define map_cpu_to_node(cpu, node) ({})
187 #define unmap_cpu_to_node(cpu) ({})
191 u8 cpu_2_logical_apicid
[NR_CPUS
] __read_mostly
=
192 { [0 ... NR_CPUS
-1] = BAD_APICID
};
194 void map_cpu_to_logical_apicid(void)
196 int cpu
= smp_processor_id();
197 int apicid
= logical_smp_processor_id();
198 int node
= apicid_to_node(apicid
);
200 if (!node_online(node
))
201 node
= first_online_node
;
203 cpu_2_logical_apicid
[cpu
] = apicid
;
204 map_cpu_to_node(cpu
, node
);
207 void unmap_cpu_to_logical_apicid(int cpu
)
209 cpu_2_logical_apicid
[cpu
] = BAD_APICID
;
210 unmap_cpu_to_node(cpu
);
213 #define unmap_cpu_to_logical_apicid(cpu) do {} while (0)
214 #define map_cpu_to_logical_apicid() do {} while (0)
218 * Report back to the Boot Processor.
221 void __cpuinit
smp_callin(void)
224 unsigned long timeout
;
227 * If waken up by an INIT in an 82489DX configuration
228 * we may get here before an INIT-deassert IPI reaches
229 * our local APIC. We have to wait for the IPI or we'll
230 * lock up on an APIC access.
232 wait_for_init_deassert(&init_deasserted
);
235 * (This works even if the APIC is not enabled.)
237 phys_id
= GET_APIC_ID(apic_read(APIC_ID
));
238 cpuid
= smp_processor_id();
239 if (cpu_isset(cpuid
, cpu_callin_map
)) {
240 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__
,
243 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid
, phys_id
);
246 * STARTUP IPIs are fragile beasts as they might sometimes
247 * trigger some glue motherboard logic. Complete APIC bus
248 * silence for 1 second, this overestimates the time the
249 * boot CPU is spending to send the up to 2 STARTUP IPIs
250 * by a factor of two. This should be enough.
254 * Waiting 2s total for startup (udelay is not yet working)
256 timeout
= jiffies
+ 2*HZ
;
257 while (time_before(jiffies
, timeout
)) {
259 * Has the boot CPU finished it's STARTUP sequence?
261 if (cpu_isset(cpuid
, cpu_callout_map
))
266 if (!time_before(jiffies
, timeout
)) {
267 panic("%s: CPU%d started up but did not get a callout!\n",
272 * the boot CPU has finished the init stage and is spinning
273 * on callin_map until we finish. We are free to set up this
274 * CPU, first the APIC. (this is probably redundant on most
278 Dprintk("CALLIN, before setup_local_APIC().\n");
279 smp_callin_clear_local_apic();
281 end_local_APIC_setup();
282 map_cpu_to_logical_apicid();
287 * Need to enable IRQs because it can take longer and then
288 * the NMI watchdog might kill us.
293 Dprintk("Stack at about %p\n", &cpuid
);
296 * Save our processor parameters
298 smp_store_cpu_info(cpuid
);
301 * Allow the master to continue.
303 cpu_set(cpuid
, cpu_callin_map
);
307 * Activate a secondary processor.
309 void __cpuinit
start_secondary(void *unused
)
312 * Don't put *anything* before cpu_init(), SMP booting is too
313 * fragile that we want to limit the things done here to the
314 * most necessary things.
323 /* otherwise gcc will move up smp_processor_id before the cpu_init */
326 * Check TSC synchronization with the BP:
328 check_tsc_sync_target();
330 if (nmi_watchdog
== NMI_IO_APIC
) {
331 disable_8259A_irq(0);
332 enable_NMI_through_LVT0();
336 /* This must be done before setting cpu_online_map */
337 set_cpu_sibling_map(raw_smp_processor_id());
341 * We need to hold call_lock, so there is no inconsistency
342 * between the time smp_call_function() determines number of
343 * IPI recipients, and the time when the determination is made
344 * for which cpus receive the IPI. Holding this
345 * lock helps us to not include this cpu in a currently in progress
346 * smp_call_function().
348 lock_ipi_call_lock();
350 spin_lock(&vector_lock
);
352 /* Setup the per cpu irq handling data structures */
353 __setup_vector_irq(smp_processor_id());
355 * Allow the master to continue.
357 spin_unlock(&vector_lock
);
359 cpu_set(smp_processor_id(), cpu_online_map
);
360 unlock_ipi_call_lock();
361 per_cpu(cpu_state
, smp_processor_id()) = CPU_ONLINE
;
363 setup_secondary_clock();
371 * Everything has been set up for the secondary
372 * CPUs - they just need to reload everything
373 * from the task structure
374 * This function must not return.
376 void __devinit
initialize_secondary(void)
379 * We don't actually need to load the full TSS,
380 * basically just the stack pointer and the ip.
387 :"m" (current
->thread
.sp
), "m" (current
->thread
.ip
));
391 static void __cpuinit
smp_apply_quirks(struct cpuinfo_x86
*c
)
395 * Mask B, Pentium, but not Pentium MMX
397 if (c
->x86_vendor
== X86_VENDOR_INTEL
&&
399 c
->x86_mask
>= 1 && c
->x86_mask
<= 4 &&
402 * Remember we have B step Pentia with bugs
407 * Certain Athlons might work (for various values of 'work') in SMP
408 * but they are not certified as MP capable.
410 if ((c
->x86_vendor
== X86_VENDOR_AMD
) && (c
->x86
== 6)) {
412 if (num_possible_cpus() == 1)
415 /* Athlon 660/661 is valid. */
416 if ((c
->x86_model
== 6) && ((c
->x86_mask
== 0) ||
420 /* Duron 670 is valid */
421 if ((c
->x86_model
== 7) && (c
->x86_mask
== 0))
425 * Athlon 662, Duron 671, and Athlon >model 7 have capability
426 * bit. It's worth noting that the A5 stepping (662) of some
427 * Athlon XP's have the MP bit set.
428 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
431 if (((c
->x86_model
== 6) && (c
->x86_mask
>= 2)) ||
432 ((c
->x86_model
== 7) && (c
->x86_mask
>= 1)) ||
437 /* If we get here, not a certified SMP capable AMD system. */
438 add_taint(TAINT_UNSAFE_SMP
);
446 void smp_checks(void)
449 printk(KERN_WARNING
"WARNING: SMP operation may be unreliable"
450 "with B stepping processors.\n");
453 * Don't taint if we are running SMP kernel on a single non-MP
456 if (tainted
& TAINT_UNSAFE_SMP
) {
457 if (num_online_cpus())
458 printk(KERN_INFO
"WARNING: This combination of AMD"
459 "processors is not suitable for SMP.\n");
461 tainted
&= ~TAINT_UNSAFE_SMP
;
466 * The bootstrap kernel entry code has set these up. Save them for
470 void __cpuinit
smp_store_cpu_info(int id
)
472 struct cpuinfo_x86
*c
= &cpu_data(id
);
477 identify_secondary_cpu(c
);
482 void __cpuinit
set_cpu_sibling_map(int cpu
)
485 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
487 cpu_set(cpu
, cpu_sibling_setup_map
);
489 if (smp_num_siblings
> 1) {
490 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
491 if (c
->phys_proc_id
== cpu_data(i
).phys_proc_id
&&
492 c
->cpu_core_id
== cpu_data(i
).cpu_core_id
) {
493 cpu_set(i
, per_cpu(cpu_sibling_map
, cpu
));
494 cpu_set(cpu
, per_cpu(cpu_sibling_map
, i
));
495 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
496 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
497 cpu_set(i
, c
->llc_shared_map
);
498 cpu_set(cpu
, cpu_data(i
).llc_shared_map
);
502 cpu_set(cpu
, per_cpu(cpu_sibling_map
, cpu
));
505 cpu_set(cpu
, c
->llc_shared_map
);
507 if (current_cpu_data
.x86_max_cores
== 1) {
508 per_cpu(cpu_core_map
, cpu
) = per_cpu(cpu_sibling_map
, cpu
);
513 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
514 if (per_cpu(cpu_llc_id
, cpu
) != BAD_APICID
&&
515 per_cpu(cpu_llc_id
, cpu
) == per_cpu(cpu_llc_id
, i
)) {
516 cpu_set(i
, c
->llc_shared_map
);
517 cpu_set(cpu
, cpu_data(i
).llc_shared_map
);
519 if (c
->phys_proc_id
== cpu_data(i
).phys_proc_id
) {
520 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
521 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
523 * Does this new cpu bringup a new core?
525 if (cpus_weight(per_cpu(cpu_sibling_map
, cpu
)) == 1) {
527 * for each core in package, increment
528 * the booted_cores for this new cpu
530 if (first_cpu(per_cpu(cpu_sibling_map
, i
)) == i
)
533 * increment the core count for all
534 * the other cpus in this package
537 cpu_data(i
).booted_cores
++;
538 } else if (i
!= cpu
&& !c
->booted_cores
)
539 c
->booted_cores
= cpu_data(i
).booted_cores
;
544 /* maps the cpu to the sched domain representing multi-core */
545 cpumask_t
cpu_coregroup_map(int cpu
)
547 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
549 * For perf, we return last level cache shared map.
550 * And for power savings, we return cpu_core_map
552 if (sched_mc_power_savings
|| sched_smt_power_savings
)
553 return per_cpu(cpu_core_map
, cpu
);
555 return c
->llc_shared_map
;
559 * Currently trivial. Write the real->protected mode
560 * bootstrap into the page concerned. The caller
561 * has made sure it's suitably aligned.
564 unsigned long __cpuinit
setup_trampoline(void)
566 memcpy(trampoline_base
, trampoline_data
,
567 trampoline_end
- trampoline_data
);
568 return virt_to_phys(trampoline_base
);
573 * We are called very early to get the low memory for the
574 * SMP bootup trampoline page.
576 void __init
smp_alloc_memory(void)
578 trampoline_base
= alloc_bootmem_low_pages(PAGE_SIZE
);
580 * Has to be in very low memory so we can execute
583 if (__pa(trampoline_base
) >= 0x9F000)
588 void impress_friends(void)
591 unsigned long bogosum
= 0;
593 * Allow the user to impress friends.
595 Dprintk("Before bogomips.\n");
596 for_each_possible_cpu(cpu
)
597 if (cpu_isset(cpu
, cpu_callout_map
))
598 bogosum
+= cpu_data(cpu
).loops_per_jiffy
;
600 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
603 (bogosum
/(5000/HZ
))%100);
605 Dprintk("Before bogocount - setting activated=1.\n");
608 static inline void __inquire_remote_apic(int apicid
)
610 unsigned i
, regs
[] = { APIC_ID
>> 4, APIC_LVR
>> 4, APIC_SPIV
>> 4 };
611 char *names
[] = { "ID", "VERSION", "SPIV" };
615 printk(KERN_INFO
"Inquiring remote APIC #%d...\n", apicid
);
617 for (i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
618 printk(KERN_INFO
"... APIC #%d %s: ", apicid
, names
[i
]);
623 status
= safe_apic_wait_icr_idle();
626 "a previous APIC delivery may have failed\n");
628 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(apicid
));
629 apic_write_around(APIC_ICR
, APIC_DM_REMRD
| regs
[i
]);
634 status
= apic_read(APIC_ICR
) & APIC_ICR_RR_MASK
;
635 } while (status
== APIC_ICR_RR_INPROG
&& timeout
++ < 1000);
638 case APIC_ICR_RR_VALID
:
639 status
= apic_read(APIC_RRR
);
640 printk(KERN_CONT
"%08x\n", status
);
643 printk(KERN_CONT
"failed\n");
648 #ifdef WAKE_SECONDARY_VIA_NMI
650 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
651 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
652 * won't ... remember to clear down the APIC, etc later.
655 wakeup_secondary_cpu(int logical_apicid
, unsigned long start_eip
)
657 unsigned long send_status
, accept_status
= 0;
661 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(logical_apicid
));
663 /* Boot on the stack */
664 /* Kick the second */
665 apic_write_around(APIC_ICR
, APIC_DM_NMI
| APIC_DEST_LOGICAL
);
667 Dprintk("Waiting for send to finish...\n");
668 send_status
= safe_apic_wait_icr_idle();
671 * Give the other CPU some time to accept the IPI.
675 * Due to the Pentium erratum 3AP.
677 maxlvt
= lapic_get_maxlvt();
679 apic_read_around(APIC_SPIV
);
680 apic_write(APIC_ESR
, 0);
682 accept_status
= (apic_read(APIC_ESR
) & 0xEF);
683 Dprintk("NMI sent.\n");
686 printk(KERN_ERR
"APIC never delivered???\n");
688 printk(KERN_ERR
"APIC delivery error (%lx).\n", accept_status
);
690 return (send_status
| accept_status
);
692 #endif /* WAKE_SECONDARY_VIA_NMI */
694 #ifdef WAKE_SECONDARY_VIA_INIT
696 wakeup_secondary_cpu(int phys_apicid
, unsigned long start_eip
)
698 unsigned long send_status
, accept_status
= 0;
699 int maxlvt
, num_starts
, j
;
702 * Be paranoid about clearing APIC errors.
704 if (APIC_INTEGRATED(apic_version
[phys_apicid
])) {
705 apic_read_around(APIC_SPIV
);
706 apic_write(APIC_ESR
, 0);
710 Dprintk("Asserting INIT.\n");
713 * Turn INIT on target chip
715 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
720 apic_write_around(APIC_ICR
, APIC_INT_LEVELTRIG
| APIC_INT_ASSERT
723 Dprintk("Waiting for send to finish...\n");
724 send_status
= safe_apic_wait_icr_idle();
728 Dprintk("Deasserting INIT.\n");
731 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
734 apic_write_around(APIC_ICR
, APIC_INT_LEVELTRIG
| APIC_DM_INIT
);
736 Dprintk("Waiting for send to finish...\n");
737 send_status
= safe_apic_wait_icr_idle();
740 atomic_set(&init_deasserted
, 1);
743 * Should we send STARTUP IPIs ?
745 * Determine this based on the APIC version.
746 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
748 if (APIC_INTEGRATED(apic_version
[phys_apicid
]))
754 * Paravirt / VMI wants a startup IPI hook here to set up the
755 * target processor state.
757 startup_ipi_hook(phys_apicid
, (unsigned long) start_secondary
,
759 (unsigned long)init_rsp
);
761 (unsigned long)stack_start
.sp
);
765 * Run STARTUP IPI loop.
767 Dprintk("#startup loops: %d.\n", num_starts
);
769 maxlvt
= lapic_get_maxlvt();
771 for (j
= 1; j
<= num_starts
; j
++) {
772 Dprintk("Sending STARTUP #%d.\n", j
);
773 apic_read_around(APIC_SPIV
);
774 apic_write(APIC_ESR
, 0);
776 Dprintk("After apic_write.\n");
783 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
785 /* Boot on the stack */
786 /* Kick the second */
787 apic_write_around(APIC_ICR
, APIC_DM_STARTUP
788 | (start_eip
>> 12));
791 * Give the other CPU some time to accept the IPI.
795 Dprintk("Startup point 1.\n");
797 Dprintk("Waiting for send to finish...\n");
798 send_status
= safe_apic_wait_icr_idle();
801 * Give the other CPU some time to accept the IPI.
805 * Due to the Pentium erratum 3AP.
808 apic_read_around(APIC_SPIV
);
809 apic_write(APIC_ESR
, 0);
811 accept_status
= (apic_read(APIC_ESR
) & 0xEF);
812 if (send_status
|| accept_status
)
815 Dprintk("After Startup.\n");
818 printk(KERN_ERR
"APIC never delivered???\n");
820 printk(KERN_ERR
"APIC delivery error (%lx).\n", accept_status
);
822 return (send_status
| accept_status
);
824 #endif /* WAKE_SECONDARY_VIA_INIT */
827 struct work_struct work
;
828 struct task_struct
*idle
;
829 struct completion done
;
833 static void __cpuinit
do_fork_idle(struct work_struct
*work
)
835 struct create_idle
*c_idle
=
836 container_of(work
, struct create_idle
, work
);
838 c_idle
->idle
= fork_idle(c_idle
->cpu
);
839 complete(&c_idle
->done
);
842 static int __cpuinit
do_boot_cpu(int apicid
, int cpu
)
844 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
845 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
846 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
849 unsigned long boot_error
= 0;
851 unsigned long start_ip
;
852 unsigned short nmi_high
= 0, nmi_low
= 0;
853 struct create_idle c_idle
= {
855 .done
= COMPLETION_INITIALIZER_ONSTACK(c_idle
.done
),
857 INIT_WORK(&c_idle
.work
, do_fork_idle
);
859 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
860 if (!cpu_gdt_descr
[cpu
].address
&&
861 !(cpu_gdt_descr
[cpu
].address
= get_zeroed_page(GFP_KERNEL
))) {
862 printk(KERN_ERR
"Failed to allocate GDT for CPU %d\n", cpu
);
866 /* Allocate node local memory for AP pdas */
867 if (cpu_pda(cpu
) == &boot_cpu_pda
[cpu
]) {
868 struct x8664_pda
*newpda
, *pda
;
869 int node
= cpu_to_node(cpu
);
871 newpda
= kmalloc_node(sizeof(struct x8664_pda
), GFP_ATOMIC
,
874 memcpy(newpda
, pda
, sizeof(struct x8664_pda
));
875 cpu_pda(cpu
) = newpda
;
878 "Could not allocate node local PDA for CPU %d on node %d\n",
883 alternatives_smp_switch(1);
885 c_idle
.idle
= get_idle_for_cpu(cpu
);
888 * We can't use kernel_thread since we must avoid to
889 * reschedule the child.
892 c_idle
.idle
->thread
.sp
= (unsigned long) (((struct pt_regs
*)
893 (THREAD_SIZE
+ task_stack_page(c_idle
.idle
))) - 1);
894 init_idle(c_idle
.idle
, cpu
);
898 if (!keventd_up() || current_is_keventd())
899 c_idle
.work
.func(&c_idle
.work
);
901 schedule_work(&c_idle
.work
);
902 wait_for_completion(&c_idle
.done
);
905 if (IS_ERR(c_idle
.idle
)) {
906 printk("failed fork for CPU %d\n", cpu
);
907 return PTR_ERR(c_idle
.idle
);
910 set_idle_for_cpu(cpu
, c_idle
.idle
);
913 per_cpu(current_task
, cpu
) = c_idle
.idle
;
915 early_gdt_descr
.address
= (unsigned long)get_cpu_gdt_table(cpu
);
916 c_idle
.idle
->thread
.ip
= (unsigned long) start_secondary
;
917 /* Stack for startup_32 can be just as for start_secondary onwards */
918 stack_start
.sp
= (void *) c_idle
.idle
->thread
.sp
;
921 cpu_pda(cpu
)->pcurrent
= c_idle
.idle
;
922 init_rsp
= c_idle
.idle
->thread
.sp
;
923 load_sp0(&per_cpu(init_tss
, cpu
), &c_idle
.idle
->thread
);
924 initial_code
= (unsigned long)start_secondary
;
925 clear_tsk_thread_flag(c_idle
.idle
, TIF_FORK
);
928 /* start_ip had better be page-aligned! */
929 start_ip
= setup_trampoline();
931 /* So we see what's up */
932 printk(KERN_INFO
"Booting processor %d/%d ip %lx\n",
933 cpu
, apicid
, start_ip
);
936 * This grunge runs the startup process for
937 * the targeted processor.
940 atomic_set(&init_deasserted
, 0);
942 Dprintk("Setting warm reset code and vector.\n");
944 store_NMI_vector(&nmi_high
, &nmi_low
);
946 smpboot_setup_warm_reset_vector(start_ip
);
948 * Be paranoid about clearing APIC errors.
950 apic_write(APIC_ESR
, 0);
954 * Starting actual IPI sequence...
956 boot_error
= wakeup_secondary_cpu(apicid
, start_ip
);
960 * allow APs to start initializing.
962 Dprintk("Before Callout %d.\n", cpu
);
963 cpu_set(cpu
, cpu_callout_map
);
964 Dprintk("After Callout %d.\n", cpu
);
967 * Wait 5s total for a response
969 for (timeout
= 0; timeout
< 50000; timeout
++) {
970 if (cpu_isset(cpu
, cpu_callin_map
))
971 break; /* It has booted */
975 if (cpu_isset(cpu
, cpu_callin_map
)) {
976 /* number CPUs logically, starting from 1 (BSP is 0) */
978 printk(KERN_INFO
"CPU%d: ", cpu
);
979 print_cpu_info(&cpu_data(cpu
));
980 Dprintk("CPU has booted.\n");
983 if (*((volatile unsigned char *)trampoline_base
)
985 /* trampoline started but...? */
986 printk(KERN_ERR
"Stuck ??\n");
988 /* trampoline code not run */
989 printk(KERN_ERR
"Not responding.\n");
990 inquire_remote_apic(apicid
);
995 /* Try to put things back the way they were before ... */
996 unmap_cpu_to_logical_apicid(cpu
);
998 clear_node_cpumask(cpu
); /* was set by numa_add_cpu */
1000 cpu_clear(cpu
, cpu_callout_map
); /* was set by do_boot_cpu() */
1001 cpu_clear(cpu
, cpu_initialized
); /* was set by cpu_init() */
1002 cpu_clear(cpu
, cpu_possible_map
);
1003 cpu_clear(cpu
, cpu_present_map
);
1004 per_cpu(x86_cpu_to_apicid
, cpu
) = BAD_APICID
;
1007 /* mark "stuck" area as not stuck */
1008 *((volatile unsigned long *)trampoline_base
) = 0;
1013 int __cpuinit
native_cpu_up(unsigned int cpu
)
1015 int apicid
= cpu_present_to_apicid(cpu
);
1016 unsigned long flags
;
1019 WARN_ON(irqs_disabled());
1021 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu
);
1023 if (apicid
== BAD_APICID
|| apicid
== boot_cpu_physical_apicid
||
1024 !physid_isset(apicid
, phys_cpu_present_map
)) {
1025 printk(KERN_ERR
"%s: bad cpu %d\n", __func__
, cpu
);
1030 * Already booted CPU?
1032 if (cpu_isset(cpu
, cpu_callin_map
)) {
1033 Dprintk("do_boot_cpu %d Already started\n", cpu
);
1038 * Save current MTRR state in case it was changed since early boot
1039 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1043 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
1045 #ifdef CONFIG_X86_32
1046 /* init low mem mapping */
1047 clone_pgd_range(swapper_pg_dir
, swapper_pg_dir
+ USER_PGD_PTRS
,
1048 min_t(unsigned long, KERNEL_PGD_PTRS
, USER_PGD_PTRS
));
1052 err
= do_boot_cpu(apicid
, cpu
);
1054 Dprintk("do_boot_cpu failed %d\n", err
);
1059 * Check TSC synchronization with the AP (keep irqs disabled
1062 local_irq_save(flags
);
1063 check_tsc_sync_source(cpu
);
1064 local_irq_restore(flags
);
1066 while (!cpu_isset(cpu
, cpu_online_map
)) {
1068 touch_nmi_watchdog();
1075 * Fall back to non SMP mode after errors.
1077 * RED-PEN audit/test this more. I bet there is more state messed up here.
1079 static __init
void disable_smp(void)
1081 cpu_present_map
= cpumask_of_cpu(0);
1082 cpu_possible_map
= cpumask_of_cpu(0);
1083 #ifdef CONFIG_X86_32
1084 smpboot_clear_io_apic_irqs();
1086 if (smp_found_config
)
1087 phys_cpu_present_map
=
1088 physid_mask_of_physid(boot_cpu_physical_apicid
);
1090 phys_cpu_present_map
= physid_mask_of_physid(0);
1091 map_cpu_to_logical_apicid();
1092 cpu_set(0, per_cpu(cpu_sibling_map
, 0));
1093 cpu_set(0, per_cpu(cpu_core_map
, 0));
1097 * Various sanity checks.
1099 static int __init
smp_sanity_check(unsigned max_cpus
)
1101 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map
)) {
1102 printk(KERN_WARNING
"weird, boot CPU (#%d) not listed"
1103 "by the BIOS.\n", hard_smp_processor_id());
1104 physid_set(hard_smp_processor_id(), phys_cpu_present_map
);
1108 * If we couldn't find an SMP configuration at boot time,
1109 * get out of here now!
1111 if (!smp_found_config
&& !acpi_lapic
) {
1112 printk(KERN_NOTICE
"SMP motherboard not detected.\n");
1114 if (APIC_init_uniprocessor())
1115 printk(KERN_NOTICE
"Local APIC not detected."
1116 " Using dummy APIC emulation.\n");
1121 * Should not be necessary because the MP table should list the boot
1122 * CPU too, but we do it for the sake of robustness anyway.
1124 if (!check_phys_apicid_present(boot_cpu_physical_apicid
)) {
1126 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1127 boot_cpu_physical_apicid
);
1128 physid_set(hard_smp_processor_id(), phys_cpu_present_map
);
1132 * If we couldn't find a local APIC, then get out of here now!
1134 if (APIC_INTEGRATED(apic_version
[boot_cpu_physical_apicid
]) &&
1136 printk(KERN_ERR
"BIOS bug, local APIC #%d not detected!...\n",
1137 boot_cpu_physical_apicid
);
1138 printk(KERN_ERR
"... forcing use of dummy APIC emulation."
1139 "(tell your hw vendor)\n");
1140 smpboot_clear_io_apic();
1144 verify_local_APIC();
1147 * If SMP should be disabled, then really disable it!
1150 printk(KERN_INFO
"SMP mode deactivated,"
1151 "forcing use of dummy APIC emulation.\n");
1152 smpboot_clear_io_apic();
1153 #ifdef CONFIG_X86_32
1154 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
1155 printk(KERN_INFO
"activating minimal APIC for"
1156 "NMI watchdog use.\n");
1159 end_local_APIC_setup();
1168 static void __init
smp_cpu_index_default(void)
1171 struct cpuinfo_x86
*c
;
1173 for_each_cpu_mask(i
, cpu_possible_map
) {
1175 /* mark all to hotplug */
1176 c
->cpu_index
= NR_CPUS
;
1181 * Prepare for SMP bootup. The MP table or ACPI has been read
1182 * earlier. Just do some sanity checking here and enable APIC mode.
1184 void __init
native_smp_prepare_cpus(unsigned int max_cpus
)
1186 nmi_watchdog_default();
1187 smp_cpu_index_default();
1188 current_cpu_data
= boot_cpu_data
;
1189 cpu_callin_map
= cpumask_of_cpu(0);
1192 * Setup boot CPU information
1194 smp_store_cpu_info(0); /* Final full version of the data */
1195 boot_cpu_logical_apicid
= logical_smp_processor_id();
1196 current_thread_info()->cpu
= 0; /* needed? */
1197 set_cpu_sibling_map(0);
1199 if (smp_sanity_check(max_cpus
) < 0) {
1200 printk(KERN_INFO
"SMP disabled\n");
1205 if (GET_APIC_ID(apic_read(APIC_ID
)) != boot_cpu_physical_apicid
) {
1206 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1207 GET_APIC_ID(apic_read(APIC_ID
)), boot_cpu_physical_apicid
);
1208 /* Or can we switch back to PIC here? */
1211 #ifdef CONFIG_X86_32
1215 * Switch from PIC to APIC mode.
1219 #ifdef CONFIG_X86_64
1221 * Enable IO APIC before setting up error vector
1223 if (!skip_ioapic_setup
&& nr_ioapics
)
1226 end_local_APIC_setup();
1228 map_cpu_to_logical_apicid();
1230 setup_portio_remap();
1232 smpboot_setup_io_apic();
1234 * Set up local APIC timer on boot CPU.
1237 printk(KERN_INFO
"CPU%d: ", 0);
1238 print_cpu_info(&cpu_data(0));
1242 * Early setup to make printk work.
1244 void __init
native_smp_prepare_boot_cpu(void)
1246 int me
= smp_processor_id();
1247 #ifdef CONFIG_X86_32
1249 switch_to_new_gdt();
1251 /* already set me in cpu_online_map in boot_cpu_init() */
1252 cpu_set(me
, cpu_callout_map
);
1253 per_cpu(cpu_state
, me
) = CPU_ONLINE
;
1256 void __init
native_smp_cpus_done(unsigned int max_cpus
)
1259 * Cleanup possible dangling ends...
1261 smpboot_restore_warm_reset_vector();
1263 Dprintk("Boot done.\n");
1267 #ifdef CONFIG_X86_IO_APIC
1268 setup_ioapic_dest();
1270 check_nmi_watchdog();
1271 #ifdef CONFIG_X86_32
1276 #ifdef CONFIG_HOTPLUG_CPU
1278 # ifdef CONFIG_X86_32
1279 void cpu_exit_clear(void)
1281 int cpu
= raw_smp_processor_id();
1288 cpu_clear(cpu
, cpu_callout_map
);
1289 cpu_clear(cpu
, cpu_callin_map
);
1291 unmap_cpu_to_logical_apicid(cpu
);
1293 # endif /* CONFIG_X86_32 */
1295 void remove_siblinginfo(int cpu
)
1298 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
1300 for_each_cpu_mask(sibling
, per_cpu(cpu_core_map
, cpu
)) {
1301 cpu_clear(cpu
, per_cpu(cpu_core_map
, sibling
));
1303 * last thread sibling in this cpu core going down
1305 if (cpus_weight(per_cpu(cpu_sibling_map
, cpu
)) == 1)
1306 cpu_data(sibling
).booted_cores
--;
1309 for_each_cpu_mask(sibling
, per_cpu(cpu_sibling_map
, cpu
))
1310 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, sibling
));
1311 cpus_clear(per_cpu(cpu_sibling_map
, cpu
));
1312 cpus_clear(per_cpu(cpu_core_map
, cpu
));
1313 c
->phys_proc_id
= 0;
1315 cpu_clear(cpu
, cpu_sibling_setup_map
);
1318 int additional_cpus __initdata
= -1;
1320 static __init
int setup_additional_cpus(char *s
)
1322 return s
&& get_option(&s
, &additional_cpus
) ? 0 : -EINVAL
;
1324 early_param("additional_cpus", setup_additional_cpus
);
1327 * cpu_possible_map should be static, it cannot change as cpu's
1328 * are onlined, or offlined. The reason is per-cpu data-structures
1329 * are allocated by some modules at init time, and dont expect to
1330 * do this dynamically on cpu arrival/departure.
1331 * cpu_present_map on the other hand can change dynamically.
1332 * In case when cpu_hotplug is not compiled, then we resort to current
1333 * behaviour, which is cpu_possible == cpu_present.
1336 * Three ways to find out the number of additional hotplug CPUs:
1337 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1338 * - The user can overwrite it with additional_cpus=NUM
1339 * - Otherwise don't reserve additional CPUs.
1340 * We do this because additional CPUs waste a lot of memory.
1343 __init
void prefill_possible_map(void)
1348 if (additional_cpus
== -1) {
1349 if (disabled_cpus
> 0)
1350 additional_cpus
= disabled_cpus
;
1352 additional_cpus
= 0;
1354 possible
= num_processors
+ additional_cpus
;
1355 if (possible
> NR_CPUS
)
1358 printk(KERN_INFO
"SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1359 possible
, max_t(int, possible
- num_processors
, 0));
1361 for (i
= 0; i
< possible
; i
++)
1362 cpu_set(i
, cpu_possible_map
);
1365 static void __ref
remove_cpu_from_maps(int cpu
)
1367 cpu_clear(cpu
, cpu_online_map
);
1368 #ifdef CONFIG_X86_64
1369 cpu_clear(cpu
, cpu_callout_map
);
1370 cpu_clear(cpu
, cpu_callin_map
);
1371 /* was set by cpu_init() */
1372 clear_bit(cpu
, (unsigned long *)&cpu_initialized
);
1373 clear_node_cpumask(cpu
);
1377 int __cpu_disable(void)
1379 int cpu
= smp_processor_id();
1382 * Perhaps use cpufreq to drop frequency, but that could go
1383 * into generic code.
1385 * We won't take down the boot processor on i386 due to some
1386 * interrupts only being able to be serviced by the BSP.
1387 * Especially so if we're not using an IOAPIC -zwane
1392 if (nmi_watchdog
== NMI_LOCAL_APIC
)
1393 stop_apic_nmi_watchdog(NULL
);
1398 * Allow any queued timer interrupts to get serviced
1399 * This is only a temporary solution until we cleanup
1400 * fixup_irqs as we do for IA64.
1405 local_irq_disable();
1406 remove_siblinginfo(cpu
);
1408 /* It's now safe to remove this processor from the online map */
1409 remove_cpu_from_maps(cpu
);
1410 fixup_irqs(cpu_online_map
);
1414 void __cpu_die(unsigned int cpu
)
1416 /* We don't do anything here: idle task is faking death itself. */
1419 for (i
= 0; i
< 10; i
++) {
1420 /* They ack this in play_dead by setting CPU_DEAD */
1421 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
) {
1422 printk(KERN_INFO
"CPU %d is now offline\n", cpu
);
1423 if (1 == num_online_cpus())
1424 alternatives_smp_switch(0);
1429 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
1431 #else /* ... !CONFIG_HOTPLUG_CPU */
1432 int __cpu_disable(void)
1437 void __cpu_die(unsigned int cpu
)
1439 /* We said "no" in __cpu_disable */
1445 * If the BIOS enumerates physical processors before logical,
1446 * maxcpus=N at enumeration-time can be used to disable HT.
1448 static int __init
parse_maxcpus(char *arg
)
1450 extern unsigned int maxcpus
;
1452 maxcpus
= simple_strtoul(arg
, NULL
, 0);
1455 early_param("maxcpus", parse_maxcpus
);