x86: merge cpu_exit_clear
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / smpboot.c
blob6a7fb1300073d6babea6ff71154257660d1b34ca
1 #include <linux/init.h>
2 #include <linux/smp.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/percpu.h>
6 #include <linux/bootmem.h>
7 #include <linux/err.h>
8 #include <linux/nmi.h>
10 #include <asm/acpi.h>
11 #include <asm/desc.h>
12 #include <asm/nmi.h>
13 #include <asm/irq.h>
14 #include <asm/smp.h>
15 #include <asm/cpu.h>
16 #include <asm/numa.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
19 #include <asm/mtrr.h>
20 #include <asm/nmi.h>
21 #include <asm/vmi.h>
22 #include <linux/mc146818rtc.h>
24 #include <mach_apic.h>
25 #include <mach_wakecpu.h>
26 #include <smpboot_hooks.h>
28 /* State of each CPU */
29 DEFINE_PER_CPU(int, cpu_state) = { 0 };
31 /* Store all idle threads, this can be reused instead of creating
32 * a new thread. Also avoids complicated thread destroy functionality
33 * for idle threads.
35 #ifdef CONFIG_HOTPLUG_CPU
37 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
38 * removed after init for !CONFIG_HOTPLUG_CPU.
40 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
41 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
42 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
43 #else
44 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
45 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
46 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
47 #endif
49 /* Number of siblings per CPU package */
50 int smp_num_siblings = 1;
51 EXPORT_SYMBOL(smp_num_siblings);
53 /* Last level cache ID of each logical CPU */
54 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
56 /* bitmap of online cpus */
57 cpumask_t cpu_online_map __read_mostly;
58 EXPORT_SYMBOL(cpu_online_map);
60 cpumask_t cpu_callin_map;
61 cpumask_t cpu_callout_map;
62 cpumask_t cpu_possible_map;
63 EXPORT_SYMBOL(cpu_possible_map);
65 /* representing HT siblings of each logical CPU */
66 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
67 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
69 /* representing HT and core siblings of each logical CPU */
70 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
71 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
73 /* Per CPU bogomips and other parameters */
74 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
75 EXPORT_PER_CPU_SYMBOL(cpu_info);
77 static atomic_t init_deasserted;
79 static int boot_cpu_logical_apicid;
81 /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
82 unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
84 /* representing cpus for which sibling maps can be computed */
85 static cpumask_t cpu_sibling_setup_map;
87 /* Set if we find a B stepping CPU */
88 int __cpuinitdata smp_b_stepping;
90 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
92 /* which logical CPUs are on which nodes */
93 cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
94 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
95 EXPORT_SYMBOL(node_to_cpumask_map);
96 /* which node each logical CPU is on */
97 int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
98 EXPORT_SYMBOL(cpu_to_node_map);
100 /* set up a mapping between cpu and node. */
101 static void map_cpu_to_node(int cpu, int node)
103 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
104 cpu_set(cpu, node_to_cpumask_map[node]);
105 cpu_to_node_map[cpu] = node;
108 /* undo a mapping between cpu and node. */
109 static void unmap_cpu_to_node(int cpu)
111 int node;
113 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
114 for (node = 0; node < MAX_NUMNODES; node++)
115 cpu_clear(cpu, node_to_cpumask_map[node]);
116 cpu_to_node_map[cpu] = 0;
118 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
119 #define map_cpu_to_node(cpu, node) ({})
120 #define unmap_cpu_to_node(cpu) ({})
121 #endif
123 #ifdef CONFIG_X86_32
124 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
125 { [0 ... NR_CPUS-1] = BAD_APICID };
127 void map_cpu_to_logical_apicid(void)
129 int cpu = smp_processor_id();
130 int apicid = logical_smp_processor_id();
131 int node = apicid_to_node(apicid);
133 if (!node_online(node))
134 node = first_online_node;
136 cpu_2_logical_apicid[cpu] = apicid;
137 map_cpu_to_node(cpu, node);
140 void unmap_cpu_to_logical_apicid(int cpu)
142 cpu_2_logical_apicid[cpu] = BAD_APICID;
143 unmap_cpu_to_node(cpu);
145 #else
146 #define unmap_cpu_to_logical_apicid(cpu) do {} while (0)
147 #define map_cpu_to_logical_apicid() do {} while (0)
148 #endif
151 * Report back to the Boot Processor.
152 * Running on AP.
154 void __cpuinit smp_callin(void)
156 int cpuid, phys_id;
157 unsigned long timeout;
160 * If waken up by an INIT in an 82489DX configuration
161 * we may get here before an INIT-deassert IPI reaches
162 * our local APIC. We have to wait for the IPI or we'll
163 * lock up on an APIC access.
165 wait_for_init_deassert(&init_deasserted);
168 * (This works even if the APIC is not enabled.)
170 phys_id = GET_APIC_ID(apic_read(APIC_ID));
171 cpuid = smp_processor_id();
172 if (cpu_isset(cpuid, cpu_callin_map)) {
173 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
174 phys_id, cpuid);
176 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
179 * STARTUP IPIs are fragile beasts as they might sometimes
180 * trigger some glue motherboard logic. Complete APIC bus
181 * silence for 1 second, this overestimates the time the
182 * boot CPU is spending to send the up to 2 STARTUP IPIs
183 * by a factor of two. This should be enough.
187 * Waiting 2s total for startup (udelay is not yet working)
189 timeout = jiffies + 2*HZ;
190 while (time_before(jiffies, timeout)) {
192 * Has the boot CPU finished it's STARTUP sequence?
194 if (cpu_isset(cpuid, cpu_callout_map))
195 break;
196 cpu_relax();
199 if (!time_before(jiffies, timeout)) {
200 panic("%s: CPU%d started up but did not get a callout!\n",
201 __func__, cpuid);
205 * the boot CPU has finished the init stage and is spinning
206 * on callin_map until we finish. We are free to set up this
207 * CPU, first the APIC. (this is probably redundant on most
208 * boards)
211 Dprintk("CALLIN, before setup_local_APIC().\n");
212 smp_callin_clear_local_apic();
213 setup_local_APIC();
214 end_local_APIC_setup();
215 map_cpu_to_logical_apicid();
218 * Get our bogomips.
220 * Need to enable IRQs because it can take longer and then
221 * the NMI watchdog might kill us.
223 local_irq_enable();
224 calibrate_delay();
225 local_irq_disable();
226 Dprintk("Stack at about %p\n", &cpuid);
229 * Save our processor parameters
231 smp_store_cpu_info(cpuid);
234 * Allow the master to continue.
236 cpu_set(cpuid, cpu_callin_map);
240 * Activate a secondary processor.
242 void __cpuinit start_secondary(void *unused)
245 * Don't put *anything* before cpu_init(), SMP booting is too
246 * fragile that we want to limit the things done here to the
247 * most necessary things.
249 #ifdef CONFIG_VMI
250 vmi_bringup();
251 #endif
252 cpu_init();
253 preempt_disable();
254 smp_callin();
256 /* otherwise gcc will move up smp_processor_id before the cpu_init */
257 barrier();
259 * Check TSC synchronization with the BP:
261 check_tsc_sync_target();
263 if (nmi_watchdog == NMI_IO_APIC) {
264 disable_8259A_irq(0);
265 enable_NMI_through_LVT0();
266 enable_8259A_irq(0);
269 /* This must be done before setting cpu_online_map */
270 set_cpu_sibling_map(raw_smp_processor_id());
271 wmb();
274 * We need to hold call_lock, so there is no inconsistency
275 * between the time smp_call_function() determines number of
276 * IPI recipients, and the time when the determination is made
277 * for which cpus receive the IPI. Holding this
278 * lock helps us to not include this cpu in a currently in progress
279 * smp_call_function().
281 lock_ipi_call_lock();
282 #ifdef CONFIG_X86_64
283 spin_lock(&vector_lock);
285 /* Setup the per cpu irq handling data structures */
286 __setup_vector_irq(smp_processor_id());
288 * Allow the master to continue.
290 spin_unlock(&vector_lock);
291 #endif
292 cpu_set(smp_processor_id(), cpu_online_map);
293 unlock_ipi_call_lock();
294 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
296 setup_secondary_clock();
298 wmb();
299 cpu_idle();
302 #ifdef CONFIG_X86_32
304 * Everything has been set up for the secondary
305 * CPUs - they just need to reload everything
306 * from the task structure
307 * This function must not return.
309 void __devinit initialize_secondary(void)
312 * We don't actually need to load the full TSS,
313 * basically just the stack pointer and the ip.
316 asm volatile(
317 "movl %0,%%esp\n\t"
318 "jmp *%1"
320 :"m" (current->thread.sp), "m" (current->thread.ip));
322 #endif
324 static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
326 #ifdef CONFIG_X86_32
328 * Mask B, Pentium, but not Pentium MMX
330 if (c->x86_vendor == X86_VENDOR_INTEL &&
331 c->x86 == 5 &&
332 c->x86_mask >= 1 && c->x86_mask <= 4 &&
333 c->x86_model <= 3)
335 * Remember we have B step Pentia with bugs
337 smp_b_stepping = 1;
340 * Certain Athlons might work (for various values of 'work') in SMP
341 * but they are not certified as MP capable.
343 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
345 if (num_possible_cpus() == 1)
346 goto valid_k7;
348 /* Athlon 660/661 is valid. */
349 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
350 (c->x86_mask == 1)))
351 goto valid_k7;
353 /* Duron 670 is valid */
354 if ((c->x86_model == 7) && (c->x86_mask == 0))
355 goto valid_k7;
358 * Athlon 662, Duron 671, and Athlon >model 7 have capability
359 * bit. It's worth noting that the A5 stepping (662) of some
360 * Athlon XP's have the MP bit set.
361 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
362 * more.
364 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
365 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
366 (c->x86_model > 7))
367 if (cpu_has_mp)
368 goto valid_k7;
370 /* If we get here, not a certified SMP capable AMD system. */
371 add_taint(TAINT_UNSAFE_SMP);
374 valid_k7:
376 #endif
379 void smp_checks(void)
381 if (smp_b_stepping)
382 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
383 "with B stepping processors.\n");
386 * Don't taint if we are running SMP kernel on a single non-MP
387 * approved Athlon
389 if (tainted & TAINT_UNSAFE_SMP) {
390 if (num_online_cpus())
391 printk(KERN_INFO "WARNING: This combination of AMD"
392 "processors is not suitable for SMP.\n");
393 else
394 tainted &= ~TAINT_UNSAFE_SMP;
399 * The bootstrap kernel entry code has set these up. Save them for
400 * a given CPU
403 void __cpuinit smp_store_cpu_info(int id)
405 struct cpuinfo_x86 *c = &cpu_data(id);
407 *c = boot_cpu_data;
408 c->cpu_index = id;
409 if (id != 0)
410 identify_secondary_cpu(c);
411 smp_apply_quirks(c);
415 void __cpuinit set_cpu_sibling_map(int cpu)
417 int i;
418 struct cpuinfo_x86 *c = &cpu_data(cpu);
420 cpu_set(cpu, cpu_sibling_setup_map);
422 if (smp_num_siblings > 1) {
423 for_each_cpu_mask(i, cpu_sibling_setup_map) {
424 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
425 c->cpu_core_id == cpu_data(i).cpu_core_id) {
426 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
427 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
428 cpu_set(i, per_cpu(cpu_core_map, cpu));
429 cpu_set(cpu, per_cpu(cpu_core_map, i));
430 cpu_set(i, c->llc_shared_map);
431 cpu_set(cpu, cpu_data(i).llc_shared_map);
434 } else {
435 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
438 cpu_set(cpu, c->llc_shared_map);
440 if (current_cpu_data.x86_max_cores == 1) {
441 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
442 c->booted_cores = 1;
443 return;
446 for_each_cpu_mask(i, cpu_sibling_setup_map) {
447 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
448 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
449 cpu_set(i, c->llc_shared_map);
450 cpu_set(cpu, cpu_data(i).llc_shared_map);
452 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
453 cpu_set(i, per_cpu(cpu_core_map, cpu));
454 cpu_set(cpu, per_cpu(cpu_core_map, i));
456 * Does this new cpu bringup a new core?
458 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
460 * for each core in package, increment
461 * the booted_cores for this new cpu
463 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
464 c->booted_cores++;
466 * increment the core count for all
467 * the other cpus in this package
469 if (i != cpu)
470 cpu_data(i).booted_cores++;
471 } else if (i != cpu && !c->booted_cores)
472 c->booted_cores = cpu_data(i).booted_cores;
477 /* maps the cpu to the sched domain representing multi-core */
478 cpumask_t cpu_coregroup_map(int cpu)
480 struct cpuinfo_x86 *c = &cpu_data(cpu);
482 * For perf, we return last level cache shared map.
483 * And for power savings, we return cpu_core_map
485 if (sched_mc_power_savings || sched_smt_power_savings)
486 return per_cpu(cpu_core_map, cpu);
487 else
488 return c->llc_shared_map;
492 * Currently trivial. Write the real->protected mode
493 * bootstrap into the page concerned. The caller
494 * has made sure it's suitably aligned.
497 unsigned long __cpuinit setup_trampoline(void)
499 memcpy(trampoline_base, trampoline_data,
500 trampoline_end - trampoline_data);
501 return virt_to_phys(trampoline_base);
504 #ifdef CONFIG_X86_32
506 * We are called very early to get the low memory for the
507 * SMP bootup trampoline page.
509 void __init smp_alloc_memory(void)
511 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
513 * Has to be in very low memory so we can execute
514 * real-mode AP code.
516 if (__pa(trampoline_base) >= 0x9F000)
517 BUG();
519 #endif
521 void impress_friends(void)
523 int cpu;
524 unsigned long bogosum = 0;
526 * Allow the user to impress friends.
528 Dprintk("Before bogomips.\n");
529 for_each_possible_cpu(cpu)
530 if (cpu_isset(cpu, cpu_callout_map))
531 bogosum += cpu_data(cpu).loops_per_jiffy;
532 printk(KERN_INFO
533 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
534 num_online_cpus(),
535 bogosum/(500000/HZ),
536 (bogosum/(5000/HZ))%100);
538 Dprintk("Before bogocount - setting activated=1.\n");
541 static inline void __inquire_remote_apic(int apicid)
543 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
544 char *names[] = { "ID", "VERSION", "SPIV" };
545 int timeout;
546 u32 status;
548 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
550 for (i = 0; i < ARRAY_SIZE(regs); i++) {
551 printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
554 * Wait for idle.
556 status = safe_apic_wait_icr_idle();
557 if (status)
558 printk(KERN_CONT
559 "a previous APIC delivery may have failed\n");
561 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
562 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
564 timeout = 0;
565 do {
566 udelay(100);
567 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
568 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
570 switch (status) {
571 case APIC_ICR_RR_VALID:
572 status = apic_read(APIC_RRR);
573 printk(KERN_CONT "%08x\n", status);
574 break;
575 default:
576 printk(KERN_CONT "failed\n");
581 #ifdef WAKE_SECONDARY_VIA_NMI
583 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
584 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
585 * won't ... remember to clear down the APIC, etc later.
587 static int __devinit
588 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
590 unsigned long send_status, accept_status = 0;
591 int maxlvt;
593 /* Target chip */
594 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
596 /* Boot on the stack */
597 /* Kick the second */
598 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
600 Dprintk("Waiting for send to finish...\n");
601 send_status = safe_apic_wait_icr_idle();
604 * Give the other CPU some time to accept the IPI.
606 udelay(200);
608 * Due to the Pentium erratum 3AP.
610 maxlvt = lapic_get_maxlvt();
611 if (maxlvt > 3) {
612 apic_read_around(APIC_SPIV);
613 apic_write(APIC_ESR, 0);
615 accept_status = (apic_read(APIC_ESR) & 0xEF);
616 Dprintk("NMI sent.\n");
618 if (send_status)
619 printk(KERN_ERR "APIC never delivered???\n");
620 if (accept_status)
621 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
623 return (send_status | accept_status);
625 #endif /* WAKE_SECONDARY_VIA_NMI */
627 #ifdef WAKE_SECONDARY_VIA_INIT
628 static int __devinit
629 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
631 unsigned long send_status, accept_status = 0;
632 int maxlvt, num_starts, j;
635 * Be paranoid about clearing APIC errors.
637 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
638 apic_read_around(APIC_SPIV);
639 apic_write(APIC_ESR, 0);
640 apic_read(APIC_ESR);
643 Dprintk("Asserting INIT.\n");
646 * Turn INIT on target chip
648 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
651 * Send IPI
653 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
654 | APIC_DM_INIT);
656 Dprintk("Waiting for send to finish...\n");
657 send_status = safe_apic_wait_icr_idle();
659 mdelay(10);
661 Dprintk("Deasserting INIT.\n");
663 /* Target chip */
664 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
666 /* Send IPI */
667 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
669 Dprintk("Waiting for send to finish...\n");
670 send_status = safe_apic_wait_icr_idle();
672 mb();
673 atomic_set(&init_deasserted, 1);
676 * Should we send STARTUP IPIs ?
678 * Determine this based on the APIC version.
679 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
681 if (APIC_INTEGRATED(apic_version[phys_apicid]))
682 num_starts = 2;
683 else
684 num_starts = 0;
687 * Paravirt / VMI wants a startup IPI hook here to set up the
688 * target processor state.
690 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
691 #ifdef CONFIG_X86_64
692 (unsigned long)init_rsp);
693 #else
694 (unsigned long)stack_start.sp);
695 #endif
698 * Run STARTUP IPI loop.
700 Dprintk("#startup loops: %d.\n", num_starts);
702 maxlvt = lapic_get_maxlvt();
704 for (j = 1; j <= num_starts; j++) {
705 Dprintk("Sending STARTUP #%d.\n", j);
706 apic_read_around(APIC_SPIV);
707 apic_write(APIC_ESR, 0);
708 apic_read(APIC_ESR);
709 Dprintk("After apic_write.\n");
712 * STARTUP IPI
715 /* Target chip */
716 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
718 /* Boot on the stack */
719 /* Kick the second */
720 apic_write_around(APIC_ICR, APIC_DM_STARTUP
721 | (start_eip >> 12));
724 * Give the other CPU some time to accept the IPI.
726 udelay(300);
728 Dprintk("Startup point 1.\n");
730 Dprintk("Waiting for send to finish...\n");
731 send_status = safe_apic_wait_icr_idle();
734 * Give the other CPU some time to accept the IPI.
736 udelay(200);
738 * Due to the Pentium erratum 3AP.
740 if (maxlvt > 3) {
741 apic_read_around(APIC_SPIV);
742 apic_write(APIC_ESR, 0);
744 accept_status = (apic_read(APIC_ESR) & 0xEF);
745 if (send_status || accept_status)
746 break;
748 Dprintk("After Startup.\n");
750 if (send_status)
751 printk(KERN_ERR "APIC never delivered???\n");
752 if (accept_status)
753 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
755 return (send_status | accept_status);
757 #endif /* WAKE_SECONDARY_VIA_INIT */
759 struct create_idle {
760 struct work_struct work;
761 struct task_struct *idle;
762 struct completion done;
763 int cpu;
766 static void __cpuinit do_fork_idle(struct work_struct *work)
768 struct create_idle *c_idle =
769 container_of(work, struct create_idle, work);
771 c_idle->idle = fork_idle(c_idle->cpu);
772 complete(&c_idle->done);
775 static int __cpuinit do_boot_cpu(int apicid, int cpu)
777 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
778 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
779 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
782 unsigned long boot_error = 0;
783 int timeout;
784 unsigned long start_ip;
785 unsigned short nmi_high = 0, nmi_low = 0;
786 struct create_idle c_idle = {
787 .cpu = cpu,
788 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
790 INIT_WORK(&c_idle.work, do_fork_idle);
791 #ifdef CONFIG_X86_64
792 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
793 if (!cpu_gdt_descr[cpu].address &&
794 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
795 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
796 return -1;
799 /* Allocate node local memory for AP pdas */
800 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
801 struct x8664_pda *newpda, *pda;
802 int node = cpu_to_node(cpu);
803 pda = cpu_pda(cpu);
804 newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC,
805 node);
806 if (newpda) {
807 memcpy(newpda, pda, sizeof(struct x8664_pda));
808 cpu_pda(cpu) = newpda;
809 } else
810 printk(KERN_ERR
811 "Could not allocate node local PDA for CPU %d on node %d\n",
812 cpu, node);
814 #endif
816 alternatives_smp_switch(1);
818 c_idle.idle = get_idle_for_cpu(cpu);
821 * We can't use kernel_thread since we must avoid to
822 * reschedule the child.
824 if (c_idle.idle) {
825 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
826 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
827 init_idle(c_idle.idle, cpu);
828 goto do_rest;
831 if (!keventd_up() || current_is_keventd())
832 c_idle.work.func(&c_idle.work);
833 else {
834 schedule_work(&c_idle.work);
835 wait_for_completion(&c_idle.done);
838 if (IS_ERR(c_idle.idle)) {
839 printk("failed fork for CPU %d\n", cpu);
840 return PTR_ERR(c_idle.idle);
843 set_idle_for_cpu(cpu, c_idle.idle);
844 do_rest:
845 #ifdef CONFIG_X86_32
846 per_cpu(current_task, cpu) = c_idle.idle;
847 init_gdt(cpu);
848 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
849 c_idle.idle->thread.ip = (unsigned long) start_secondary;
850 /* Stack for startup_32 can be just as for start_secondary onwards */
851 stack_start.sp = (void *) c_idle.idle->thread.sp;
852 irq_ctx_init(cpu);
853 #else
854 cpu_pda(cpu)->pcurrent = c_idle.idle;
855 init_rsp = c_idle.idle->thread.sp;
856 load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
857 initial_code = (unsigned long)start_secondary;
858 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
859 #endif
861 /* start_ip had better be page-aligned! */
862 start_ip = setup_trampoline();
864 /* So we see what's up */
865 printk(KERN_INFO "Booting processor %d/%d ip %lx\n",
866 cpu, apicid, start_ip);
869 * This grunge runs the startup process for
870 * the targeted processor.
873 atomic_set(&init_deasserted, 0);
875 Dprintk("Setting warm reset code and vector.\n");
877 store_NMI_vector(&nmi_high, &nmi_low);
879 smpboot_setup_warm_reset_vector(start_ip);
881 * Be paranoid about clearing APIC errors.
883 apic_write(APIC_ESR, 0);
884 apic_read(APIC_ESR);
887 * Starting actual IPI sequence...
889 boot_error = wakeup_secondary_cpu(apicid, start_ip);
891 if (!boot_error) {
893 * allow APs to start initializing.
895 Dprintk("Before Callout %d.\n", cpu);
896 cpu_set(cpu, cpu_callout_map);
897 Dprintk("After Callout %d.\n", cpu);
900 * Wait 5s total for a response
902 for (timeout = 0; timeout < 50000; timeout++) {
903 if (cpu_isset(cpu, cpu_callin_map))
904 break; /* It has booted */
905 udelay(100);
908 if (cpu_isset(cpu, cpu_callin_map)) {
909 /* number CPUs logically, starting from 1 (BSP is 0) */
910 Dprintk("OK.\n");
911 printk(KERN_INFO "CPU%d: ", cpu);
912 print_cpu_info(&cpu_data(cpu));
913 Dprintk("CPU has booted.\n");
914 } else {
915 boot_error = 1;
916 if (*((volatile unsigned char *)trampoline_base)
917 == 0xA5)
918 /* trampoline started but...? */
919 printk(KERN_ERR "Stuck ??\n");
920 else
921 /* trampoline code not run */
922 printk(KERN_ERR "Not responding.\n");
923 inquire_remote_apic(apicid);
927 if (boot_error) {
928 /* Try to put things back the way they were before ... */
929 unmap_cpu_to_logical_apicid(cpu);
930 #ifdef CONFIG_X86_64
931 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
932 #endif
933 cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
934 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
935 cpu_clear(cpu, cpu_possible_map);
936 cpu_clear(cpu, cpu_present_map);
937 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
940 /* mark "stuck" area as not stuck */
941 *((volatile unsigned long *)trampoline_base) = 0;
943 return boot_error;
946 int __cpuinit native_cpu_up(unsigned int cpu)
948 int apicid = cpu_present_to_apicid(cpu);
949 unsigned long flags;
950 int err;
952 WARN_ON(irqs_disabled());
954 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
956 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
957 !physid_isset(apicid, phys_cpu_present_map)) {
958 printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
959 return -EINVAL;
963 * Already booted CPU?
965 if (cpu_isset(cpu, cpu_callin_map)) {
966 Dprintk("do_boot_cpu %d Already started\n", cpu);
967 return -ENOSYS;
971 * Save current MTRR state in case it was changed since early boot
972 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
974 mtrr_save_state();
976 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
978 #ifdef CONFIG_X86_32
979 /* init low mem mapping */
980 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
981 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
982 flush_tlb_all();
983 #endif
985 err = do_boot_cpu(apicid, cpu);
986 if (err < 0) {
987 Dprintk("do_boot_cpu failed %d\n", err);
988 return err;
992 * Check TSC synchronization with the AP (keep irqs disabled
993 * while doing so):
995 local_irq_save(flags);
996 check_tsc_sync_source(cpu);
997 local_irq_restore(flags);
999 while (!cpu_isset(cpu, cpu_online_map)) {
1000 cpu_relax();
1001 touch_nmi_watchdog();
1004 return 0;
1008 * Fall back to non SMP mode after errors.
1010 * RED-PEN audit/test this more. I bet there is more state messed up here.
1012 static __init void disable_smp(void)
1014 cpu_present_map = cpumask_of_cpu(0);
1015 cpu_possible_map = cpumask_of_cpu(0);
1016 #ifdef CONFIG_X86_32
1017 smpboot_clear_io_apic_irqs();
1018 #endif
1019 if (smp_found_config)
1020 phys_cpu_present_map =
1021 physid_mask_of_physid(boot_cpu_physical_apicid);
1022 else
1023 phys_cpu_present_map = physid_mask_of_physid(0);
1024 map_cpu_to_logical_apicid();
1025 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1026 cpu_set(0, per_cpu(cpu_core_map, 0));
1030 * Various sanity checks.
1032 static int __init smp_sanity_check(unsigned max_cpus)
1034 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1035 printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
1036 "by the BIOS.\n", hard_smp_processor_id());
1037 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1041 * If we couldn't find an SMP configuration at boot time,
1042 * get out of here now!
1044 if (!smp_found_config && !acpi_lapic) {
1045 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1046 disable_smp();
1047 if (APIC_init_uniprocessor())
1048 printk(KERN_NOTICE "Local APIC not detected."
1049 " Using dummy APIC emulation.\n");
1050 return -1;
1054 * Should not be necessary because the MP table should list the boot
1055 * CPU too, but we do it for the sake of robustness anyway.
1057 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1058 printk(KERN_NOTICE
1059 "weird, boot CPU (#%d) not listed by the BIOS.\n",
1060 boot_cpu_physical_apicid);
1061 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1065 * If we couldn't find a local APIC, then get out of here now!
1067 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1068 !cpu_has_apic) {
1069 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1070 boot_cpu_physical_apicid);
1071 printk(KERN_ERR "... forcing use of dummy APIC emulation."
1072 "(tell your hw vendor)\n");
1073 smpboot_clear_io_apic();
1074 return -1;
1077 verify_local_APIC();
1080 * If SMP should be disabled, then really disable it!
1082 if (!max_cpus) {
1083 printk(KERN_INFO "SMP mode deactivated,"
1084 "forcing use of dummy APIC emulation.\n");
1085 smpboot_clear_io_apic();
1086 #ifdef CONFIG_X86_32
1087 if (nmi_watchdog == NMI_LOCAL_APIC) {
1088 printk(KERN_INFO "activating minimal APIC for"
1089 "NMI watchdog use.\n");
1090 connect_bsp_APIC();
1091 setup_local_APIC();
1092 end_local_APIC_setup();
1094 #endif
1095 return -1;
1098 return 0;
1101 static void __init smp_cpu_index_default(void)
1103 int i;
1104 struct cpuinfo_x86 *c;
1106 for_each_cpu_mask(i, cpu_possible_map) {
1107 c = &cpu_data(i);
1108 /* mark all to hotplug */
1109 c->cpu_index = NR_CPUS;
1114 * Prepare for SMP bootup. The MP table or ACPI has been read
1115 * earlier. Just do some sanity checking here and enable APIC mode.
1117 void __init native_smp_prepare_cpus(unsigned int max_cpus)
1119 nmi_watchdog_default();
1120 smp_cpu_index_default();
1121 current_cpu_data = boot_cpu_data;
1122 cpu_callin_map = cpumask_of_cpu(0);
1123 mb();
1125 * Setup boot CPU information
1127 smp_store_cpu_info(0); /* Final full version of the data */
1128 boot_cpu_logical_apicid = logical_smp_processor_id();
1129 current_thread_info()->cpu = 0; /* needed? */
1130 set_cpu_sibling_map(0);
1132 if (smp_sanity_check(max_cpus) < 0) {
1133 printk(KERN_INFO "SMP disabled\n");
1134 disable_smp();
1135 return;
1138 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) {
1139 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1140 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid);
1141 /* Or can we switch back to PIC here? */
1144 #ifdef CONFIG_X86_32
1145 connect_bsp_APIC();
1146 #endif
1148 * Switch from PIC to APIC mode.
1150 setup_local_APIC();
1152 #ifdef CONFIG_X86_64
1154 * Enable IO APIC before setting up error vector
1156 if (!skip_ioapic_setup && nr_ioapics)
1157 enable_IO_APIC();
1158 #endif
1159 end_local_APIC_setup();
1161 map_cpu_to_logical_apicid();
1163 setup_portio_remap();
1165 smpboot_setup_io_apic();
1167 * Set up local APIC timer on boot CPU.
1170 printk(KERN_INFO "CPU%d: ", 0);
1171 print_cpu_info(&cpu_data(0));
1172 setup_boot_clock();
1175 * Early setup to make printk work.
1177 void __init native_smp_prepare_boot_cpu(void)
1179 int me = smp_processor_id();
1180 #ifdef CONFIG_X86_32
1181 init_gdt(me);
1182 switch_to_new_gdt();
1183 #endif
1184 /* already set me in cpu_online_map in boot_cpu_init() */
1185 cpu_set(me, cpu_callout_map);
1186 per_cpu(cpu_state, me) = CPU_ONLINE;
1189 void __init native_smp_cpus_done(unsigned int max_cpus)
1192 * Cleanup possible dangling ends...
1194 smpboot_restore_warm_reset_vector();
1196 Dprintk("Boot done.\n");
1198 impress_friends();
1199 smp_checks();
1200 #ifdef CONFIG_X86_IO_APIC
1201 setup_ioapic_dest();
1202 #endif
1203 check_nmi_watchdog();
1204 #ifdef CONFIG_X86_32
1205 zap_low_mappings();
1206 #endif
1209 #ifdef CONFIG_HOTPLUG_CPU
1211 # ifdef CONFIG_X86_32
1212 void cpu_exit_clear(void)
1214 int cpu = raw_smp_processor_id();
1216 idle_task_exit();
1218 cpu_uninit();
1219 irq_ctx_exit(cpu);
1221 cpu_clear(cpu, cpu_callout_map);
1222 cpu_clear(cpu, cpu_callin_map);
1224 unmap_cpu_to_logical_apicid(cpu);
1226 # endif /* CONFIG_X86_32 */
1228 void remove_siblinginfo(int cpu)
1230 int sibling;
1231 struct cpuinfo_x86 *c = &cpu_data(cpu);
1233 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1234 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1236 * last thread sibling in this cpu core going down
1238 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1239 cpu_data(sibling).booted_cores--;
1242 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1243 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1244 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1245 cpus_clear(per_cpu(cpu_core_map, cpu));
1246 c->phys_proc_id = 0;
1247 c->cpu_core_id = 0;
1248 cpu_clear(cpu, cpu_sibling_setup_map);
1251 int additional_cpus __initdata = -1;
1253 static __init int setup_additional_cpus(char *s)
1255 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1257 early_param("additional_cpus", setup_additional_cpus);
1260 * cpu_possible_map should be static, it cannot change as cpu's
1261 * are onlined, or offlined. The reason is per-cpu data-structures
1262 * are allocated by some modules at init time, and dont expect to
1263 * do this dynamically on cpu arrival/departure.
1264 * cpu_present_map on the other hand can change dynamically.
1265 * In case when cpu_hotplug is not compiled, then we resort to current
1266 * behaviour, which is cpu_possible == cpu_present.
1267 * - Ashok Raj
1269 * Three ways to find out the number of additional hotplug CPUs:
1270 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1271 * - The user can overwrite it with additional_cpus=NUM
1272 * - Otherwise don't reserve additional CPUs.
1273 * We do this because additional CPUs waste a lot of memory.
1274 * -AK
1276 __init void prefill_possible_map(void)
1278 int i;
1279 int possible;
1281 if (additional_cpus == -1) {
1282 if (disabled_cpus > 0)
1283 additional_cpus = disabled_cpus;
1284 else
1285 additional_cpus = 0;
1287 possible = num_processors + additional_cpus;
1288 if (possible > NR_CPUS)
1289 possible = NR_CPUS;
1291 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1292 possible, max_t(int, possible - num_processors, 0));
1294 for (i = 0; i < possible; i++)
1295 cpu_set(i, cpu_possible_map);
1298 static void __ref remove_cpu_from_maps(int cpu)
1300 cpu_clear(cpu, cpu_online_map);
1301 #ifdef CONFIG_X86_64
1302 cpu_clear(cpu, cpu_callout_map);
1303 cpu_clear(cpu, cpu_callin_map);
1304 /* was set by cpu_init() */
1305 clear_bit(cpu, (unsigned long *)&cpu_initialized);
1306 clear_node_cpumask(cpu);
1307 #endif
1310 int __cpu_disable(void)
1312 int cpu = smp_processor_id();
1315 * Perhaps use cpufreq to drop frequency, but that could go
1316 * into generic code.
1318 * We won't take down the boot processor on i386 due to some
1319 * interrupts only being able to be serviced by the BSP.
1320 * Especially so if we're not using an IOAPIC -zwane
1322 if (cpu == 0)
1323 return -EBUSY;
1325 if (nmi_watchdog == NMI_LOCAL_APIC)
1326 stop_apic_nmi_watchdog(NULL);
1327 clear_local_APIC();
1330 * HACK:
1331 * Allow any queued timer interrupts to get serviced
1332 * This is only a temporary solution until we cleanup
1333 * fixup_irqs as we do for IA64.
1335 local_irq_enable();
1336 mdelay(1);
1338 local_irq_disable();
1339 remove_siblinginfo(cpu);
1341 /* It's now safe to remove this processor from the online map */
1342 remove_cpu_from_maps(cpu);
1343 fixup_irqs(cpu_online_map);
1344 return 0;
1347 void __cpu_die(unsigned int cpu)
1349 /* We don't do anything here: idle task is faking death itself. */
1350 unsigned int i;
1352 for (i = 0; i < 10; i++) {
1353 /* They ack this in play_dead by setting CPU_DEAD */
1354 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1355 printk(KERN_INFO "CPU %d is now offline\n", cpu);
1356 if (1 == num_online_cpus())
1357 alternatives_smp_switch(0);
1358 return;
1360 msleep(100);
1362 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1364 #else /* ... !CONFIG_HOTPLUG_CPU */
1365 int __cpu_disable(void)
1367 return -ENOSYS;
1370 void __cpu_die(unsigned int cpu)
1372 /* We said "no" in __cpu_disable */
1373 BUG();
1375 #endif
1378 * If the BIOS enumerates physical processors before logical,
1379 * maxcpus=N at enumeration-time can be used to disable HT.
1381 static int __init parse_maxcpus(char *arg)
1383 extern unsigned int maxcpus;
1385 maxcpus = simple_strtoul(arg, NULL, 0);
1386 return 0;
1388 early_param("maxcpus", parse_maxcpus);