initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / i386 / kernel / smpboot.c
blob016a070675b3d0fd9615d195b542727ce6bb7e97
1 /*
2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
15 * later.
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/module.h>
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
41 #include <linux/mm.h>
42 #include <linux/sched.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/smp_lock.h>
45 #include <linux/irq.h>
46 #include <linux/bootmem.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <asm/tlbflush.h>
51 #include <asm/desc.h>
52 #include <asm/arch_hooks.h>
54 #include <mach_apic.h>
55 #include <mach_wakecpu.h>
56 #include <smpboot_hooks.h>
58 /* Set if we find a B stepping CPU */
59 static int __initdata smp_b_stepping;
61 /* Number of siblings per CPU package */
62 int smp_num_siblings = 1;
63 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
65 /* bitmap of online cpus */
66 cpumask_t cpu_online_map;
68 static cpumask_t cpu_callin_map;
69 cpumask_t cpu_callout_map;
70 static cpumask_t smp_commenced_mask;
72 /* Per CPU bogomips and other parameters */
73 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
75 u8 x86_cpu_to_apicid[NR_CPUS] =
76 { [0 ... NR_CPUS-1] = 0xff };
77 EXPORT_SYMBOL(x86_cpu_to_apicid);
79 /* Set when the idlers are all forked */
80 int smp_threads_ready;
83 * Trampoline 80x86 program as an array.
86 extern unsigned char trampoline_data [];
87 extern unsigned char trampoline_end [];
88 static unsigned char *trampoline_base;
89 static int trampoline_exec;
92 * Currently trivial. Write the real->protected mode
93 * bootstrap into the page concerned. The caller
94 * has made sure it's suitably aligned.
97 static unsigned long __init setup_trampoline(void)
99 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
100 return virt_to_phys(trampoline_base);
104 * We are called very early to get the low memory for the
105 * SMP bootup trampoline page.
107 void __init smp_alloc_memory(void)
109 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
111 * Has to be in very low memory so we can execute
112 * real-mode AP code.
114 if (__pa(trampoline_base) >= 0x9F000)
115 BUG();
117 * Make the SMP trampoline executable:
119 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
123 * The bootstrap kernel entry code has set these up. Save them for
124 * a given CPU
127 static void __init smp_store_cpu_info(int id)
129 struct cpuinfo_x86 *c = cpu_data + id;
131 *c = boot_cpu_data;
132 if (id!=0)
133 identify_cpu(c);
135 * Mask B, Pentium, but not Pentium MMX
137 if (c->x86_vendor == X86_VENDOR_INTEL &&
138 c->x86 == 5 &&
139 c->x86_mask >= 1 && c->x86_mask <= 4 &&
140 c->x86_model <= 3)
142 * Remember we have B step Pentia with bugs
144 smp_b_stepping = 1;
147 * Certain Athlons might work (for various values of 'work') in SMP
148 * but they are not certified as MP capable.
150 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
152 /* Athlon 660/661 is valid. */
153 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
154 goto valid_k7;
156 /* Duron 670 is valid */
157 if ((c->x86_model==7) && (c->x86_mask==0))
158 goto valid_k7;
161 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
162 * It's worth noting that the A5 stepping (662) of some Athlon XP's
163 * have the MP bit set.
164 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
166 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
167 ((c->x86_model==7) && (c->x86_mask>=1)) ||
168 (c->x86_model> 7))
169 if (cpu_has_mp)
170 goto valid_k7;
172 /* If we get here, it's not a certified SMP capable AMD system. */
173 tainted |= TAINT_UNSAFE_SMP;
176 valid_k7:
181 * TSC synchronization.
183 * We first check whether all CPUs have their TSC's synchronized,
184 * then we print a warning if not, and always resync.
187 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
188 static atomic_t tsc_count_start = ATOMIC_INIT(0);
189 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
190 static unsigned long long tsc_values[NR_CPUS];
192 #define NR_LOOPS 5
194 static void __init synchronize_tsc_bp (void)
196 int i;
197 unsigned long long t0;
198 unsigned long long sum, avg;
199 long long delta;
200 unsigned long one_usec;
201 int buggy = 0;
203 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
205 /* convert from kcyc/sec to cyc/usec */
206 one_usec = cpu_khz / 1000;
208 atomic_set(&tsc_start_flag, 1);
209 wmb();
212 * We loop a few times to get a primed instruction cache,
213 * then the last pass is more or less synchronized and
214 * the BP and APs set their cycle counters to zero all at
215 * once. This reduces the chance of having random offsets
216 * between the processors, and guarantees that the maximum
217 * delay between the cycle counters is never bigger than
218 * the latency of information-passing (cachelines) between
219 * two CPUs.
221 for (i = 0; i < NR_LOOPS; i++) {
223 * all APs synchronize but they loop on '== num_cpus'
225 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
226 mb();
227 atomic_set(&tsc_count_stop, 0);
228 wmb();
230 * this lets the APs save their current TSC:
232 atomic_inc(&tsc_count_start);
234 rdtscll(tsc_values[smp_processor_id()]);
236 * We clear the TSC in the last loop:
238 if (i == NR_LOOPS-1)
239 write_tsc(0, 0);
242 * Wait for all APs to leave the synchronization point:
244 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
245 mb();
246 atomic_set(&tsc_count_start, 0);
247 wmb();
248 atomic_inc(&tsc_count_stop);
251 sum = 0;
252 for (i = 0; i < NR_CPUS; i++) {
253 if (cpu_isset(i, cpu_callout_map)) {
254 t0 = tsc_values[i];
255 sum += t0;
258 avg = sum;
259 do_div(avg, num_booting_cpus());
261 sum = 0;
262 for (i = 0; i < NR_CPUS; i++) {
263 if (!cpu_isset(i, cpu_callout_map))
264 continue;
265 delta = tsc_values[i] - avg;
266 if (delta < 0)
267 delta = -delta;
269 * We report bigger than 2 microseconds clock differences.
271 if (delta > 2*one_usec) {
272 long realdelta;
273 if (!buggy) {
274 buggy = 1;
275 printk("\n");
277 realdelta = delta;
278 do_div(realdelta, one_usec);
279 if (tsc_values[i] < avg)
280 realdelta = -realdelta;
282 printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
285 sum += delta;
287 if (!buggy)
288 printk("passed.\n");
291 static void __init synchronize_tsc_ap (void)
293 int i;
296 * Not every cpu is online at the time
297 * this gets called, so we first wait for the BP to
298 * finish SMP initialization:
300 while (!atomic_read(&tsc_start_flag)) mb();
302 for (i = 0; i < NR_LOOPS; i++) {
303 atomic_inc(&tsc_count_start);
304 while (atomic_read(&tsc_count_start) != num_booting_cpus())
305 mb();
307 rdtscll(tsc_values[smp_processor_id()]);
308 if (i == NR_LOOPS-1)
309 write_tsc(0, 0);
311 atomic_inc(&tsc_count_stop);
312 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
315 #undef NR_LOOPS
317 extern void calibrate_delay(void);
319 static atomic_t init_deasserted;
321 void __init smp_callin(void)
323 int cpuid, phys_id;
324 unsigned long timeout;
327 * If waken up by an INIT in an 82489DX configuration
328 * we may get here before an INIT-deassert IPI reaches
329 * our local APIC. We have to wait for the IPI or we'll
330 * lock up on an APIC access.
332 wait_for_init_deassert(&init_deasserted);
335 * (This works even if the APIC is not enabled.)
337 phys_id = GET_APIC_ID(apic_read(APIC_ID));
338 cpuid = smp_processor_id();
339 if (cpu_isset(cpuid, cpu_callin_map)) {
340 printk("huh, phys CPU#%d, CPU#%d already present??\n",
341 phys_id, cpuid);
342 BUG();
344 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
347 * STARTUP IPIs are fragile beasts as they might sometimes
348 * trigger some glue motherboard logic. Complete APIC bus
349 * silence for 1 second, this overestimates the time the
350 * boot CPU is spending to send the up to 2 STARTUP IPIs
351 * by a factor of two. This should be enough.
355 * Waiting 2s total for startup (udelay is not yet working)
357 timeout = jiffies + 2*HZ;
358 while (time_before(jiffies, timeout)) {
360 * Has the boot CPU finished it's STARTUP sequence?
362 if (cpu_isset(cpuid, cpu_callout_map))
363 break;
364 rep_nop();
367 if (!time_before(jiffies, timeout)) {
368 printk("BUG: CPU%d started up but did not get a callout!\n",
369 cpuid);
370 BUG();
374 * the boot CPU has finished the init stage and is spinning
375 * on callin_map until we finish. We are free to set up this
376 * CPU, first the APIC. (this is probably redundant on most
377 * boards)
380 Dprintk("CALLIN, before setup_local_APIC().\n");
381 smp_callin_clear_local_apic();
382 setup_local_APIC();
383 map_cpu_to_logical_apicid();
385 local_irq_enable();
388 * Get our bogomips.
390 calibrate_delay();
391 Dprintk("Stack at about %p\n",&cpuid);
394 * Save our processor parameters
396 smp_store_cpu_info(cpuid);
398 disable_APIC_timer();
399 local_irq_disable();
401 * Allow the master to continue.
403 cpu_set(cpuid, cpu_callin_map);
406 * Synchronize the TSC with the BP
408 if (cpu_has_tsc && cpu_khz)
409 synchronize_tsc_ap();
412 int cpucount;
414 extern int cpu_idle(void);
417 * Activate a secondary processor.
419 int __init start_secondary(void *unused)
422 * Dont put anything before smp_callin(), SMP
423 * booting is too fragile that we want to limit the
424 * things done here to the most necessary things.
426 cpu_init();
427 smp_callin();
428 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
429 rep_nop();
430 setup_secondary_APIC_clock();
431 if (nmi_watchdog == NMI_IO_APIC) {
432 disable_8259A_irq(0);
433 enable_NMI_through_LVT0(NULL);
434 enable_8259A_irq(0);
436 enable_APIC_timer();
438 * low-memory mappings have been cleared, flush them from
439 * the local TLBs too.
441 local_flush_tlb();
442 cpu_set(smp_processor_id(), cpu_online_map);
443 wmb();
444 return cpu_idle();
448 * Everything has been set up for the secondary
449 * CPUs - they just need to reload everything
450 * from the task structure
451 * This function must not return.
453 void __init initialize_secondary(void)
456 * We don't actually need to load the full TSS,
457 * basically just the stack pointer and the eip.
460 asm volatile(
461 "movl %0,%%esp\n\t"
462 "jmp *%1"
464 :"r" (current->thread.esp),"r" (current->thread.eip));
467 extern struct {
468 void * esp;
469 unsigned short ss;
470 } stack_start;
472 #ifdef CONFIG_NUMA
474 /* which logical CPUs are on which nodes */
475 cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
476 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
477 /* which node each logical CPU is on */
478 int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
479 EXPORT_SYMBOL(cpu_2_node);
481 /* set up a mapping between cpu and node. */
482 static inline void map_cpu_to_node(int cpu, int node)
484 printk("Mapping cpu %d to node %d\n", cpu, node);
485 cpu_set(cpu, node_2_cpu_mask[node]);
486 cpu_2_node[cpu] = node;
489 /* undo a mapping between cpu and node. */
490 static inline void unmap_cpu_to_node(int cpu)
492 int node;
494 printk("Unmapping cpu %d from all nodes\n", cpu);
495 for (node = 0; node < MAX_NUMNODES; node ++)
496 cpu_clear(cpu, node_2_cpu_mask[node]);
497 cpu_2_node[cpu] = 0;
499 #else /* !CONFIG_NUMA */
501 #define map_cpu_to_node(cpu, node) ({})
502 #define unmap_cpu_to_node(cpu) ({})
504 #endif /* CONFIG_NUMA */
506 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
508 void map_cpu_to_logical_apicid(void)
510 int cpu = smp_processor_id();
511 int apicid = logical_smp_processor_id();
513 cpu_2_logical_apicid[cpu] = apicid;
514 map_cpu_to_node(cpu, apicid_to_node(apicid));
517 void unmap_cpu_to_logical_apicid(int cpu)
519 cpu_2_logical_apicid[cpu] = BAD_APICID;
520 unmap_cpu_to_node(cpu);
523 #if APIC_DEBUG
524 static inline void __inquire_remote_apic(int apicid)
526 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
527 char *names[] = { "ID", "VERSION", "SPIV" };
528 int timeout, status;
530 printk("Inquiring remote APIC #%d...\n", apicid);
532 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
533 printk("... APIC #%d %s: ", apicid, names[i]);
536 * Wait for idle.
538 apic_wait_icr_idle();
540 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
541 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
543 timeout = 0;
544 do {
545 udelay(100);
546 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
547 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
549 switch (status) {
550 case APIC_ICR_RR_VALID:
551 status = apic_read(APIC_RRR);
552 printk("%08x\n", status);
553 break;
554 default:
555 printk("failed\n");
559 #endif
561 #ifdef WAKE_SECONDARY_VIA_NMI
563 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
564 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
565 * won't ... remember to clear down the APIC, etc later.
567 static int __init
568 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
570 unsigned long send_status = 0, accept_status = 0;
571 int timeout, maxlvt;
573 /* Target chip */
574 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
576 /* Boot on the stack */
577 /* Kick the second */
578 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
580 Dprintk("Waiting for send to finish...\n");
581 timeout = 0;
582 do {
583 Dprintk("+");
584 udelay(100);
585 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
586 } while (send_status && (timeout++ < 1000));
589 * Give the other CPU some time to accept the IPI.
591 udelay(200);
593 * Due to the Pentium erratum 3AP.
595 maxlvt = get_maxlvt();
596 if (maxlvt > 3) {
597 apic_read_around(APIC_SPIV);
598 apic_write(APIC_ESR, 0);
600 accept_status = (apic_read(APIC_ESR) & 0xEF);
601 Dprintk("NMI sent.\n");
603 if (send_status)
604 printk("APIC never delivered???\n");
605 if (accept_status)
606 printk("APIC delivery error (%lx).\n", accept_status);
608 return (send_status | accept_status);
610 #endif /* WAKE_SECONDARY_VIA_NMI */
612 #ifdef WAKE_SECONDARY_VIA_INIT
613 static int __init
614 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
616 unsigned long send_status = 0, accept_status = 0;
617 int maxlvt, timeout, num_starts, j;
620 * Be paranoid about clearing APIC errors.
622 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
623 apic_read_around(APIC_SPIV);
624 apic_write(APIC_ESR, 0);
625 apic_read(APIC_ESR);
628 Dprintk("Asserting INIT.\n");
631 * Turn INIT on target chip
633 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
636 * Send IPI
638 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
639 | APIC_DM_INIT);
641 Dprintk("Waiting for send to finish...\n");
642 timeout = 0;
643 do {
644 Dprintk("+");
645 udelay(100);
646 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
647 } while (send_status && (timeout++ < 1000));
649 mdelay(10);
651 Dprintk("Deasserting INIT.\n");
653 /* Target chip */
654 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
656 /* Send IPI */
657 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
659 Dprintk("Waiting for send to finish...\n");
660 timeout = 0;
661 do {
662 Dprintk("+");
663 udelay(100);
664 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
665 } while (send_status && (timeout++ < 1000));
667 atomic_set(&init_deasserted, 1);
670 * Should we send STARTUP IPIs ?
672 * Determine this based on the APIC version.
673 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
675 if (APIC_INTEGRATED(apic_version[phys_apicid]))
676 num_starts = 2;
677 else
678 num_starts = 0;
681 * Run STARTUP IPI loop.
683 Dprintk("#startup loops: %d.\n", num_starts);
685 maxlvt = get_maxlvt();
687 for (j = 1; j <= num_starts; j++) {
688 Dprintk("Sending STARTUP #%d.\n",j);
689 apic_read_around(APIC_SPIV);
690 apic_write(APIC_ESR, 0);
691 apic_read(APIC_ESR);
692 Dprintk("After apic_write.\n");
695 * STARTUP IPI
698 /* Target chip */
699 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
701 /* Boot on the stack */
702 /* Kick the second */
703 apic_write_around(APIC_ICR, APIC_DM_STARTUP
704 | (start_eip >> 12));
707 * Give the other CPU some time to accept the IPI.
709 udelay(300);
711 Dprintk("Startup point 1.\n");
713 Dprintk("Waiting for send to finish...\n");
714 timeout = 0;
715 do {
716 Dprintk("+");
717 udelay(100);
718 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
719 } while (send_status && (timeout++ < 1000));
722 * Give the other CPU some time to accept the IPI.
724 udelay(200);
726 * Due to the Pentium erratum 3AP.
728 if (maxlvt > 3) {
729 apic_read_around(APIC_SPIV);
730 apic_write(APIC_ESR, 0);
732 accept_status = (apic_read(APIC_ESR) & 0xEF);
733 if (send_status || accept_status)
734 break;
736 Dprintk("After Startup.\n");
738 if (send_status)
739 printk("APIC never delivered???\n");
740 if (accept_status)
741 printk("APIC delivery error (%lx).\n", accept_status);
743 return (send_status | accept_status);
745 #endif /* WAKE_SECONDARY_VIA_INIT */
747 extern cpumask_t cpu_initialized;
749 static int __init do_boot_cpu(int apicid)
751 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
752 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
753 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
756 struct task_struct *idle;
757 unsigned long boot_error;
758 int timeout, cpu;
759 unsigned long start_eip;
760 unsigned short nmi_high = 0, nmi_low = 0;
762 cpu = ++cpucount;
764 * We can't use kernel_thread since we must avoid to
765 * reschedule the child.
767 idle = fork_idle(cpu);
768 if (IS_ERR(idle))
769 panic("failed fork for CPU %d", cpu);
770 idle->thread.eip = (unsigned long) start_secondary;
771 /* start_eip had better be page-aligned! */
772 start_eip = setup_trampoline();
774 /* So we see what's up */
775 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
776 /* Stack for startup_32 can be just as for start_secondary onwards */
777 stack_start.esp = (void *) idle->thread.esp;
779 irq_ctx_init(cpu);
782 * This grunge runs the startup process for
783 * the targeted processor.
786 atomic_set(&init_deasserted, 0);
788 Dprintk("Setting warm reset code and vector.\n");
790 store_NMI_vector(&nmi_high, &nmi_low);
792 smpboot_setup_warm_reset_vector(start_eip);
795 * Starting actual IPI sequence...
797 boot_error = wakeup_secondary_cpu(apicid, start_eip);
799 if (!boot_error) {
801 * allow APs to start initializing.
803 Dprintk("Before Callout %d.\n", cpu);
804 cpu_set(cpu, cpu_callout_map);
805 Dprintk("After Callout %d.\n", cpu);
808 * Wait 5s total for a response
810 for (timeout = 0; timeout < 50000; timeout++) {
811 if (cpu_isset(cpu, cpu_callin_map))
812 break; /* It has booted */
813 udelay(100);
816 if (cpu_isset(cpu, cpu_callin_map)) {
817 /* number CPUs logically, starting from 1 (BSP is 0) */
818 Dprintk("OK.\n");
819 printk("CPU%d: ", cpu);
820 print_cpu_info(&cpu_data[cpu]);
821 Dprintk("CPU has booted.\n");
822 } else {
823 boot_error= 1;
824 if (*((volatile unsigned char *)trampoline_base)
825 == 0xA5)
826 /* trampoline started but...? */
827 printk("Stuck ??\n");
828 else
829 /* trampoline code not run */
830 printk("Not responding.\n");
831 inquire_remote_apic(apicid);
834 x86_cpu_to_apicid[cpu] = apicid;
835 if (boot_error) {
836 /* Try to put things back the way they were before ... */
837 unmap_cpu_to_logical_apicid(cpu);
838 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
839 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
840 cpucount--;
843 /* mark "stuck" area as not stuck */
844 *((volatile unsigned long *)trampoline_base) = 0;
846 return boot_error;
849 cycles_t cacheflush_time;
850 unsigned long cache_decay_ticks;
852 static void smp_tune_scheduling (void)
854 unsigned long cachesize; /* kB */
855 unsigned long bandwidth = 350; /* MB/s */
857 * Rough estimation for SMP scheduling, this is the number of
858 * cycles it takes for a fully memory-limited process to flush
859 * the SMP-local cache.
861 * (For a P5 this pretty much means we will choose another idle
862 * CPU almost always at wakeup time (this is due to the small
863 * L1 cache), on PIIs it's around 50-100 usecs, depending on
864 * the cache size)
867 if (!cpu_khz) {
869 * this basically disables processor-affinity
870 * scheduling on SMP without a TSC.
872 cacheflush_time = 0;
873 return;
874 } else {
875 cachesize = boot_cpu_data.x86_cache_size;
876 if (cachesize == -1) {
877 cachesize = 16; /* Pentiums, 2x8kB cache */
878 bandwidth = 100;
881 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
884 cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
886 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
887 (long)cacheflush_time/(cpu_khz/1000),
888 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
889 printk("task migration cache decay timeout: %ld msecs.\n",
890 cache_decay_ticks);
894 * Cycle through the processors sending APIC IPIs to boot each.
897 static int boot_cpu_logical_apicid;
898 /* Where the IO area was mapped on multiquad, always 0 otherwise */
899 void *xquad_portio;
901 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
903 static void __init smp_boot_cpus(unsigned int max_cpus)
905 int apicid, cpu, bit, kicked;
906 unsigned long bogosum = 0;
909 * Setup boot CPU information
911 smp_store_cpu_info(0); /* Final full version of the data */
912 printk("CPU%d: ", 0);
913 print_cpu_info(&cpu_data[0]);
915 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
916 boot_cpu_logical_apicid = logical_smp_processor_id();
917 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
919 current_thread_info()->cpu = 0;
920 smp_tune_scheduling();
921 cpus_clear(cpu_sibling_map[0]);
922 cpu_set(0, cpu_sibling_map[0]);
925 * If we couldn't find an SMP configuration at boot time,
926 * get out of here now!
928 if (!smp_found_config && !acpi_lapic) {
929 printk(KERN_NOTICE "SMP motherboard not detected.\n");
930 smpboot_clear_io_apic_irqs();
931 phys_cpu_present_map = physid_mask_of_physid(0);
932 if (APIC_init_uniprocessor())
933 printk(KERN_NOTICE "Local APIC not detected."
934 " Using dummy APIC emulation.\n");
935 map_cpu_to_logical_apicid();
936 return;
940 * Should not be necessary because the MP table should list the boot
941 * CPU too, but we do it for the sake of robustness anyway.
942 * Makes no sense to do this check in clustered apic mode, so skip it
944 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
945 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
946 boot_cpu_physical_apicid);
947 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
951 * If we couldn't find a local APIC, then get out of here now!
953 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
954 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
955 boot_cpu_physical_apicid);
956 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
957 smpboot_clear_io_apic_irqs();
958 phys_cpu_present_map = physid_mask_of_physid(0);
959 return;
962 verify_local_APIC();
965 * If SMP should be disabled, then really disable it!
967 if (!max_cpus) {
968 smp_found_config = 0;
969 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
970 smpboot_clear_io_apic_irqs();
971 phys_cpu_present_map = physid_mask_of_physid(0);
972 return;
975 connect_bsp_APIC();
976 setup_local_APIC();
977 map_cpu_to_logical_apicid();
980 setup_portio_remap();
983 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
985 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
986 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
987 * clustered apic ID.
989 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
991 kicked = 1;
992 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
993 apicid = cpu_present_to_apicid(bit);
995 * Don't even attempt to start the boot CPU!
997 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
998 continue;
1000 if (!check_apicid_present(bit))
1001 continue;
1002 if (max_cpus <= cpucount+1)
1003 continue;
1005 if (do_boot_cpu(apicid))
1006 printk("CPU #%d not responding - cannot use it.\n",
1007 apicid);
1008 else
1009 ++kicked;
1013 * Cleanup possible dangling ends...
1015 smpboot_restore_warm_reset_vector();
1018 * Allow the user to impress friends.
1020 Dprintk("Before bogomips.\n");
1021 for (cpu = 0; cpu < NR_CPUS; cpu++)
1022 if (cpu_isset(cpu, cpu_callout_map))
1023 bogosum += cpu_data[cpu].loops_per_jiffy;
1024 printk(KERN_INFO
1025 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1026 cpucount+1,
1027 bogosum/(500000/HZ),
1028 (bogosum/(5000/HZ))%100);
1030 Dprintk("Before bogocount - setting activated=1.\n");
1032 if (smp_b_stepping)
1033 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1036 * Don't taint if we are running SMP kernel on a single non-MP
1037 * approved Athlon
1039 if (tainted & TAINT_UNSAFE_SMP) {
1040 if (cpucount)
1041 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1042 else
1043 tainted &= ~TAINT_UNSAFE_SMP;
1046 Dprintk("Boot done.\n");
1049 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1050 * efficiently.
1052 for (cpu = 0; cpu < NR_CPUS; cpu++)
1053 cpus_clear(cpu_sibling_map[cpu]);
1055 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1056 int siblings = 0;
1057 int i;
1058 if (!cpu_isset(cpu, cpu_callout_map))
1059 continue;
1061 if (smp_num_siblings > 1) {
1062 for (i = 0; i < NR_CPUS; i++) {
1063 if (!cpu_isset(i, cpu_callout_map))
1064 continue;
1065 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1066 siblings++;
1067 cpu_set(i, cpu_sibling_map[cpu]);
1070 } else {
1071 siblings++;
1072 cpu_set(cpu, cpu_sibling_map[cpu]);
1075 if (siblings != smp_num_siblings)
1076 printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
1079 if (nmi_watchdog == NMI_LOCAL_APIC)
1080 check_nmi_watchdog();
1082 smpboot_setup_io_apic();
1084 setup_boot_APIC_clock();
1087 * Synchronize the TSC with the AP
1089 if (cpu_has_tsc && cpucount && cpu_khz)
1090 synchronize_tsc_bp();
1093 /* These are wrappers to interface to the new boot process. Someone
1094 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1095 void __init smp_prepare_cpus(unsigned int max_cpus)
1097 smp_boot_cpus(max_cpus);
1100 void __devinit smp_prepare_boot_cpu(void)
1102 cpu_set(smp_processor_id(), cpu_online_map);
1103 cpu_set(smp_processor_id(), cpu_callout_map);
1106 int __devinit __cpu_up(unsigned int cpu)
1108 /* This only works at boot for x86. See "rewrite" above. */
1109 if (cpu_isset(cpu, smp_commenced_mask)) {
1110 local_irq_enable();
1111 return -ENOSYS;
1114 /* In case one didn't come up */
1115 if (!cpu_isset(cpu, cpu_callin_map)) {
1116 local_irq_enable();
1117 return -EIO;
1120 local_irq_enable();
1121 /* Unleash the CPU! */
1122 cpu_set(cpu, smp_commenced_mask);
1123 while (!cpu_isset(cpu, cpu_online_map))
1124 mb();
1125 return 0;
1128 void __init smp_cpus_done(unsigned int max_cpus)
1130 #ifdef CONFIG_X86_IO_APIC
1131 setup_ioapic_dest();
1132 #endif
1133 zap_low_mappings();
1135 * Disable executability of the SMP trampoline:
1137 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1140 void __init smp_intr_init(void)
1143 * IRQ0 must be given a fixed assignment and initialized,
1144 * because it's used before the IO-APIC is set up.
1146 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1149 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1150 * IPI, driven by wakeup.
1152 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1154 /* IPI for invalidation */
1155 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1157 /* IPI for generic function call */
1158 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);