[PATCH] x86, x86_64: dual core proc-cpuinfo and sibling-map fix
[linux-2.6/btrfs-unstable.git] / arch / x86_64 / kernel / smpboot.c
blob73f7e8b9543aa55051e6a912e231db6ad6c8aa3b
1 /*
2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
35 * Various cleanups.
36 * Probably mostly hotplug CPU ready now.
40 #include <linux/config.h>
41 #include <linux/init.h>
43 #include <linux/mm.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/smp_lock.h>
46 #include <linux/irq.h>
47 #include <linux/bootmem.h>
48 #include <linux/thread_info.h>
49 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
53 #include <asm/mtrr.h>
54 #include <asm/pgalloc.h>
55 #include <asm/desc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
60 /* Change for real CPU hotplug. Note other files need to be fixed
61 first too. */
62 #define __cpuinit __init
63 #define __cpuinitdata __initdata
65 /* Number of siblings per CPU package */
66 int smp_num_siblings = 1;
67 /* Package ID of each logical CPU */
68 u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
69 u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
70 EXPORT_SYMBOL(phys_proc_id);
71 EXPORT_SYMBOL(cpu_core_id);
73 /* Bitmask of currently online CPUs */
74 cpumask_t cpu_online_map;
76 EXPORT_SYMBOL(cpu_online_map);
79 * Private maps to synchronize booting between AP and BP.
80 * Probably not needed anymore, but it makes for easier debugging. -AK
82 cpumask_t cpu_callin_map;
83 cpumask_t cpu_callout_map;
85 cpumask_t cpu_possible_map;
86 EXPORT_SYMBOL(cpu_possible_map);
88 /* Per CPU bogomips and other parameters */
89 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
91 /* Set when the idlers are all forked */
92 int smp_threads_ready;
94 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
95 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
98 * Trampoline 80x86 program as an array.
101 extern unsigned char trampoline_data[];
102 extern unsigned char trampoline_end[];
105 * Currently trivial. Write the real->protected mode
106 * bootstrap into the page concerned. The caller
107 * has made sure it's suitably aligned.
110 static unsigned long __cpuinit setup_trampoline(void)
112 void *tramp = __va(SMP_TRAMPOLINE_BASE);
113 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
114 return virt_to_phys(tramp);
118 * The bootstrap kernel entry code has set these up. Save them for
119 * a given CPU
122 static void __cpuinit smp_store_cpu_info(int id)
124 struct cpuinfo_x86 *c = cpu_data + id;
126 *c = boot_cpu_data;
127 identify_cpu(c);
131 * Synchronize TSCs of CPUs
133 * This new algorithm is less accurate than the old "zero TSCs"
134 * one, but we cannot zero TSCs anymore in the new hotplug CPU
135 * model.
138 static atomic_t __cpuinitdata tsc_flag;
139 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
140 static unsigned long long __cpuinitdata bp_tsc, ap_tsc;
142 #define NR_LOOPS 5
144 static void __cpuinit sync_tsc_bp_init(int init)
146 if (init)
147 _raw_spin_lock(&tsc_sync_lock);
148 else
149 _raw_spin_unlock(&tsc_sync_lock);
150 atomic_set(&tsc_flag, 0);
154 * Synchronize TSC on AP with BP.
156 static void __cpuinit __sync_tsc_ap(void)
158 if (!cpu_has_tsc)
159 return;
160 Dprintk("AP %d syncing TSC\n", smp_processor_id());
162 while (atomic_read(&tsc_flag) != 0)
163 cpu_relax();
164 atomic_inc(&tsc_flag);
165 mb();
166 _raw_spin_lock(&tsc_sync_lock);
167 wrmsrl(MSR_IA32_TSC, bp_tsc);
168 _raw_spin_unlock(&tsc_sync_lock);
169 rdtscll(ap_tsc);
170 mb();
171 atomic_inc(&tsc_flag);
172 mb();
175 static void __cpuinit sync_tsc_ap(void)
177 int i;
178 for (i = 0; i < NR_LOOPS; i++)
179 __sync_tsc_ap();
183 * Synchronize TSC from BP to AP.
185 static void __cpuinit __sync_tsc_bp(int cpu)
187 if (!cpu_has_tsc)
188 return;
190 /* Wait for AP */
191 while (atomic_read(&tsc_flag) == 0)
192 cpu_relax();
193 /* Save BPs TSC */
194 sync_core();
195 rdtscll(bp_tsc);
196 /* Don't do the sync core here to avoid too much latency. */
197 mb();
198 /* Start the AP */
199 _raw_spin_unlock(&tsc_sync_lock);
200 /* Wait for AP again */
201 while (atomic_read(&tsc_flag) < 2)
202 cpu_relax();
203 rdtscl(bp_tsc);
204 barrier();
207 static void __cpuinit sync_tsc_bp(int cpu)
209 int i;
210 for (i = 0; i < NR_LOOPS - 1; i++) {
211 __sync_tsc_bp(cpu);
212 sync_tsc_bp_init(1);
214 __sync_tsc_bp(cpu);
215 printk(KERN_INFO "Synced TSC of CPU %d difference %Ld\n",
216 cpu, ap_tsc - bp_tsc);
219 static atomic_t init_deasserted __cpuinitdata;
222 * Report back to the Boot Processor.
223 * Running on AP.
225 void __cpuinit smp_callin(void)
227 int cpuid, phys_id;
228 unsigned long timeout;
231 * If waken up by an INIT in an 82489DX configuration
232 * we may get here before an INIT-deassert IPI reaches
233 * our local APIC. We have to wait for the IPI or we'll
234 * lock up on an APIC access.
236 while (!atomic_read(&init_deasserted))
237 cpu_relax();
240 * (This works even if the APIC is not enabled.)
242 phys_id = GET_APIC_ID(apic_read(APIC_ID));
243 cpuid = smp_processor_id();
244 if (cpu_isset(cpuid, cpu_callin_map)) {
245 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
246 phys_id, cpuid);
248 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
251 * STARTUP IPIs are fragile beasts as they might sometimes
252 * trigger some glue motherboard logic. Complete APIC bus
253 * silence for 1 second, this overestimates the time the
254 * boot CPU is spending to send the up to 2 STARTUP IPIs
255 * by a factor of two. This should be enough.
259 * Waiting 2s total for startup (udelay is not yet working)
261 timeout = jiffies + 2*HZ;
262 while (time_before(jiffies, timeout)) {
264 * Has the boot CPU finished it's STARTUP sequence?
266 if (cpu_isset(cpuid, cpu_callout_map))
267 break;
268 cpu_relax();
271 if (!time_before(jiffies, timeout)) {
272 panic("smp_callin: CPU%d started up but did not get a callout!\n",
273 cpuid);
277 * the boot CPU has finished the init stage and is spinning
278 * on callin_map until we finish. We are free to set up this
279 * CPU, first the APIC. (this is probably redundant on most
280 * boards)
283 Dprintk("CALLIN, before setup_local_APIC().\n");
284 setup_local_APIC();
287 * Get our bogomips.
289 calibrate_delay();
290 Dprintk("Stack at about %p\n",&cpuid);
292 disable_APIC_timer();
295 * Save our processor parameters
297 smp_store_cpu_info(cpuid);
300 * Allow the master to continue.
302 cpu_set(cpuid, cpu_callin_map);
306 * Setup code on secondary processor (after comming out of the trampoline)
308 void __cpuinit start_secondary(void)
311 * Dont put anything before smp_callin(), SMP
312 * booting is too fragile that we want to limit the
313 * things done here to the most necessary things.
315 cpu_init();
316 smp_callin();
319 * Synchronize the TSC with the BP
321 sync_tsc_ap();
323 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
324 barrier();
326 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
327 setup_secondary_APIC_clock();
329 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
331 if (nmi_watchdog == NMI_IO_APIC) {
332 disable_8259A_irq(0);
333 enable_NMI_through_LVT0(NULL);
334 enable_8259A_irq(0);
338 enable_APIC_timer();
341 * Allow the master to continue.
343 cpu_set(smp_processor_id(), cpu_online_map);
344 mb();
346 cpu_idle();
349 extern volatile unsigned long init_rsp;
350 extern void (*initial_code)(void);
352 #if APIC_DEBUG
353 static void inquire_remote_apic(int apicid)
355 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
356 char *names[] = { "ID", "VERSION", "SPIV" };
357 int timeout, status;
359 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
361 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
362 printk("... APIC #%d %s: ", apicid, names[i]);
365 * Wait for idle.
367 apic_wait_icr_idle();
369 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
370 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
372 timeout = 0;
373 do {
374 udelay(100);
375 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
376 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
378 switch (status) {
379 case APIC_ICR_RR_VALID:
380 status = apic_read(APIC_RRR);
381 printk("%08x\n", status);
382 break;
383 default:
384 printk("failed\n");
388 #endif
391 * Kick the secondary to wake up.
393 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
395 unsigned long send_status = 0, accept_status = 0;
396 int maxlvt, timeout, num_starts, j;
398 Dprintk("Asserting INIT.\n");
401 * Turn INIT on target chip
403 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
406 * Send IPI
408 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
409 | APIC_DM_INIT);
411 Dprintk("Waiting for send to finish...\n");
412 timeout = 0;
413 do {
414 Dprintk("+");
415 udelay(100);
416 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
417 } while (send_status && (timeout++ < 1000));
419 mdelay(10);
421 Dprintk("Deasserting INIT.\n");
423 /* Target chip */
424 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
426 /* Send IPI */
427 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
429 Dprintk("Waiting for send to finish...\n");
430 timeout = 0;
431 do {
432 Dprintk("+");
433 udelay(100);
434 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
435 } while (send_status && (timeout++ < 1000));
437 atomic_set(&init_deasserted, 1);
440 * Should we send STARTUP IPIs ?
442 * Determine this based on the APIC version.
443 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
445 if (APIC_INTEGRATED(apic_version[phys_apicid]))
446 num_starts = 2;
447 else
448 num_starts = 0;
451 * Run STARTUP IPI loop.
453 Dprintk("#startup loops: %d.\n", num_starts);
455 maxlvt = get_maxlvt();
457 for (j = 1; j <= num_starts; j++) {
458 Dprintk("Sending STARTUP #%d.\n",j);
459 apic_read_around(APIC_SPIV);
460 apic_write(APIC_ESR, 0);
461 apic_read(APIC_ESR);
462 Dprintk("After apic_write.\n");
465 * STARTUP IPI
468 /* Target chip */
469 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
471 /* Boot on the stack */
472 /* Kick the second */
473 apic_write_around(APIC_ICR, APIC_DM_STARTUP
474 | (start_rip >> 12));
477 * Give the other CPU some time to accept the IPI.
479 udelay(300);
481 Dprintk("Startup point 1.\n");
483 Dprintk("Waiting for send to finish...\n");
484 timeout = 0;
485 do {
486 Dprintk("+");
487 udelay(100);
488 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
489 } while (send_status && (timeout++ < 1000));
492 * Give the other CPU some time to accept the IPI.
494 udelay(200);
496 * Due to the Pentium erratum 3AP.
498 if (maxlvt > 3) {
499 apic_read_around(APIC_SPIV);
500 apic_write(APIC_ESR, 0);
502 accept_status = (apic_read(APIC_ESR) & 0xEF);
503 if (send_status || accept_status)
504 break;
506 Dprintk("After Startup.\n");
508 if (send_status)
509 printk(KERN_ERR "APIC never delivered???\n");
510 if (accept_status)
511 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
513 return (send_status | accept_status);
517 * Boot one CPU.
519 static int __cpuinit do_boot_cpu(int cpu, int apicid)
521 struct task_struct *idle;
522 unsigned long boot_error;
523 int timeout;
524 unsigned long start_rip;
526 * We can't use kernel_thread since we must avoid to
527 * reschedule the child.
529 idle = fork_idle(cpu);
530 if (IS_ERR(idle)) {
531 printk("failed fork for CPU %d\n", cpu);
532 return PTR_ERR(idle);
534 x86_cpu_to_apicid[cpu] = apicid;
536 cpu_pda[cpu].pcurrent = idle;
538 start_rip = setup_trampoline();
540 init_rsp = idle->thread.rsp;
541 per_cpu(init_tss,cpu).rsp0 = init_rsp;
542 initial_code = start_secondary;
543 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
545 printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
546 start_rip, init_rsp);
549 * This grunge runs the startup process for
550 * the targeted processor.
553 atomic_set(&init_deasserted, 0);
555 Dprintk("Setting warm reset code and vector.\n");
557 CMOS_WRITE(0xa, 0xf);
558 local_flush_tlb();
559 Dprintk("1.\n");
560 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
561 Dprintk("2.\n");
562 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
563 Dprintk("3.\n");
566 * Be paranoid about clearing APIC errors.
568 if (APIC_INTEGRATED(apic_version[apicid])) {
569 apic_read_around(APIC_SPIV);
570 apic_write(APIC_ESR, 0);
571 apic_read(APIC_ESR);
575 * Status is now clean
577 boot_error = 0;
580 * Starting actual IPI sequence...
582 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
584 if (!boot_error) {
586 * allow APs to start initializing.
588 Dprintk("Before Callout %d.\n", cpu);
589 cpu_set(cpu, cpu_callout_map);
590 Dprintk("After Callout %d.\n", cpu);
593 * Wait 5s total for a response
595 for (timeout = 0; timeout < 50000; timeout++) {
596 if (cpu_isset(cpu, cpu_callin_map))
597 break; /* It has booted */
598 udelay(100);
601 if (cpu_isset(cpu, cpu_callin_map)) {
602 /* number CPUs logically, starting from 1 (BSP is 0) */
603 Dprintk("OK.\n");
604 print_cpu_info(&cpu_data[cpu]);
605 Dprintk("CPU has booted.\n");
606 } else {
607 boot_error = 1;
608 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
609 == 0xA5)
610 /* trampoline started but...? */
611 printk("Stuck ??\n");
612 else
613 /* trampoline code not run */
614 printk("Not responding.\n");
615 #if APIC_DEBUG
616 inquire_remote_apic(apicid);
617 #endif
620 if (boot_error) {
621 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
622 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
623 cpu_clear(cpu, cpu_present_map);
624 cpu_clear(cpu, cpu_possible_map);
625 x86_cpu_to_apicid[cpu] = BAD_APICID;
626 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
627 return -EIO;
630 return 0;
633 cycles_t cacheflush_time;
634 unsigned long cache_decay_ticks;
637 * Construct cpu_sibling_map[], so that we can tell the sibling CPU
638 * on SMT systems efficiently.
640 static __cpuinit void detect_siblings(void)
642 int cpu;
644 for (cpu = 0; cpu < NR_CPUS; cpu++) {
645 cpus_clear(cpu_sibling_map[cpu]);
646 cpus_clear(cpu_core_map[cpu]);
649 for_each_online_cpu (cpu) {
650 struct cpuinfo_x86 *c = cpu_data + cpu;
651 int siblings = 0;
652 int i;
653 if (smp_num_siblings > 1) {
654 for_each_online_cpu (i) {
655 if (cpu_core_id[cpu] == cpu_core_id[i]) {
656 siblings++;
657 cpu_set(i, cpu_sibling_map[cpu]);
660 } else {
661 siblings++;
662 cpu_set(cpu, cpu_sibling_map[cpu]);
665 if (siblings != smp_num_siblings) {
666 printk(KERN_WARNING
667 "WARNING: %d siblings found for CPU%d, should be %d\n",
668 siblings, cpu, smp_num_siblings);
669 smp_num_siblings = siblings;
671 if (c->x86_num_cores > 1) {
672 for_each_online_cpu(i) {
673 if (phys_proc_id[cpu] == phys_proc_id[i])
674 cpu_set(i, cpu_core_map[cpu]);
676 } else
677 cpu_core_map[cpu] = cpu_sibling_map[cpu];
682 * Cleanup possible dangling ends...
684 static __cpuinit void smp_cleanup_boot(void)
687 * Paranoid: Set warm reset code and vector here back
688 * to default values.
690 CMOS_WRITE(0, 0xf);
693 * Reset trampoline flag
695 *((volatile int *) phys_to_virt(0x467)) = 0;
697 #ifndef CONFIG_HOTPLUG_CPU
699 * Free pages reserved for SMP bootup.
700 * When you add hotplug CPU support later remove this
701 * Note there is more work to be done for later CPU bootup.
704 free_page((unsigned long) __va(PAGE_SIZE));
705 free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
706 #endif
710 * Fall back to non SMP mode after errors.
712 * RED-PEN audit/test this more. I bet there is more state messed up here.
714 static __cpuinit void disable_smp(void)
716 cpu_present_map = cpumask_of_cpu(0);
717 cpu_possible_map = cpumask_of_cpu(0);
718 if (smp_found_config)
719 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
720 else
721 phys_cpu_present_map = physid_mask_of_physid(0);
722 cpu_set(0, cpu_sibling_map[0]);
723 cpu_set(0, cpu_core_map[0]);
727 * Handle user cpus=... parameter.
729 static __cpuinit void enforce_max_cpus(unsigned max_cpus)
731 int i, k;
732 k = 0;
733 for (i = 0; i < NR_CPUS; i++) {
734 if (!cpu_possible(i))
735 continue;
736 if (++k > max_cpus) {
737 cpu_clear(i, cpu_possible_map);
738 cpu_clear(i, cpu_present_map);
744 * Various sanity checks.
746 static int __cpuinit smp_sanity_check(unsigned max_cpus)
748 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
749 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
750 hard_smp_processor_id());
751 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
755 * If we couldn't find an SMP configuration at boot time,
756 * get out of here now!
758 if (!smp_found_config) {
759 printk(KERN_NOTICE "SMP motherboard not detected.\n");
760 disable_smp();
761 if (APIC_init_uniprocessor())
762 printk(KERN_NOTICE "Local APIC not detected."
763 " Using dummy APIC emulation.\n");
764 return -1;
768 * Should not be necessary because the MP table should list the boot
769 * CPU too, but we do it for the sake of robustness anyway.
771 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
772 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
773 boot_cpu_id);
774 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
778 * If we couldn't find a local APIC, then get out of here now!
780 if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
781 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
782 boot_cpu_id);
783 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
784 nr_ioapics = 0;
785 return -1;
789 * If SMP should be disabled, then really disable it!
791 if (!max_cpus) {
792 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
793 nr_ioapics = 0;
794 return -1;
797 return 0;
801 * Prepare for SMP bootup. The MP table or ACPI has been read
802 * earlier. Just do some sanity checking here and enable APIC mode.
804 void __cpuinit smp_prepare_cpus(unsigned int max_cpus)
806 int i;
808 nmi_watchdog_default();
809 current_cpu_data = boot_cpu_data;
810 current_thread_info()->cpu = 0; /* needed? */
812 enforce_max_cpus(max_cpus);
815 * Fill in cpu_present_mask
817 for (i = 0; i < NR_CPUS; i++) {
818 int apicid = cpu_present_to_apicid(i);
819 if (physid_isset(apicid, phys_cpu_present_map)) {
820 cpu_set(i, cpu_present_map);
821 /* possible map would be different if we supported real
822 CPU hotplug. */
823 cpu_set(i, cpu_possible_map);
827 if (smp_sanity_check(max_cpus) < 0) {
828 printk(KERN_INFO "SMP disabled\n");
829 disable_smp();
830 return;
835 * Switch from PIC to APIC mode.
837 connect_bsp_APIC();
838 setup_local_APIC();
840 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
841 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
842 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
843 /* Or can we switch back to PIC here? */
845 x86_cpu_to_apicid[0] = boot_cpu_id;
848 * Now start the IO-APICs
850 if (!skip_ioapic_setup && nr_ioapics)
851 setup_IO_APIC();
852 else
853 nr_ioapics = 0;
856 * Set up local APIC timer on boot CPU.
859 setup_boot_APIC_clock();
863 * Early setup to make printk work.
865 void __init smp_prepare_boot_cpu(void)
867 int me = smp_processor_id();
868 cpu_set(me, cpu_online_map);
869 cpu_set(me, cpu_callout_map);
873 * Entry point to boot a CPU.
875 * This is all __cpuinit, not __devinit for now because we don't support
876 * CPU hotplug (yet).
878 int __cpuinit __cpu_up(unsigned int cpu)
880 int err;
881 int apicid = cpu_present_to_apicid(cpu);
883 WARN_ON(irqs_disabled());
885 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
887 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
888 !physid_isset(apicid, phys_cpu_present_map)) {
889 printk("__cpu_up: bad cpu %d\n", cpu);
890 return -EINVAL;
892 sync_tsc_bp_init(1);
894 /* Boot it! */
895 err = do_boot_cpu(cpu, apicid);
896 if (err < 0) {
897 sync_tsc_bp_init(0);
898 Dprintk("do_boot_cpu failed %d\n", err);
899 return err;
902 sync_tsc_bp(cpu);
904 /* Unleash the CPU! */
905 Dprintk("waiting for cpu %d\n", cpu);
907 while (!cpu_isset(cpu, cpu_online_map))
908 cpu_relax();
909 return 0;
913 * Finish the SMP boot.
915 void __cpuinit smp_cpus_done(unsigned int max_cpus)
917 zap_low_mappings();
918 smp_cleanup_boot();
920 #ifdef CONFIG_X86_IO_APIC
921 setup_ioapic_dest();
922 #endif
924 detect_siblings();
925 time_init_gtod();