2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/config.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
41 #include <linux/kernel_stat.h>
42 #include <linux/smp_lock.h>
43 #include <linux/irq.h>
44 #include <linux/bootmem.h>
46 #include <linux/delay.h>
47 #include <linux/mc146818rtc.h>
48 #include <asm/pgalloc.h>
49 #include <asm/tlbflush.h>
50 #include <asm/smpboot.h>
52 #include <asm/arch_hooks.h>
53 #include "smpboot_hooks.h"
54 #include "mach_apic.h"
56 /* Set if we find a B stepping CPU */
57 static int __initdata smp_b_stepping
;
59 /* Number of siblings per CPU package */
60 int smp_num_siblings
= 1;
61 int phys_proc_id
[NR_CPUS
]; /* Package ID of each logical CPU */
63 /* Bitmask of currently online CPUs */
64 unsigned long cpu_online_map
;
66 static volatile unsigned long cpu_callin_map
;
67 volatile unsigned long cpu_callout_map
;
68 static unsigned long smp_commenced_mask
;
70 /* Per CPU bogomips and other parameters */
71 struct cpuinfo_x86 cpu_data
[NR_CPUS
] __cacheline_aligned
;
73 /* Set when the idlers are all forked */
74 int smp_threads_ready
;
77 * Trampoline 80x86 program as an array.
80 extern unsigned char trampoline_data
[];
81 extern unsigned char trampoline_end
[];
82 static unsigned char *trampoline_base
;
85 * Currently trivial. Write the real->protected mode
86 * bootstrap into the page concerned. The caller
87 * has made sure it's suitably aligned.
90 static unsigned long __init
setup_trampoline(void)
92 memcpy(trampoline_base
, trampoline_data
, trampoline_end
- trampoline_data
);
93 return virt_to_phys(trampoline_base
);
97 * We are called very early to get the low memory for the
98 * SMP bootup trampoline page.
100 void __init
smp_alloc_memory(void)
102 trampoline_base
= (void *) alloc_bootmem_low_pages(PAGE_SIZE
);
104 * Has to be in very low memory so we can execute
107 if (__pa(trampoline_base
) >= 0x9F000)
112 * The bootstrap kernel entry code has set these up. Save them for
116 static void __init
smp_store_cpu_info(int id
)
118 struct cpuinfo_x86
*c
= cpu_data
+ id
;
124 * Mask B, Pentium, but not Pentium MMX
126 if (c
->x86_vendor
== X86_VENDOR_INTEL
&&
128 c
->x86_mask
>= 1 && c
->x86_mask
<= 4 &&
131 * Remember we have B step Pentia with bugs
136 * Certain Athlons might work (for various values of 'work') in SMP
137 * but they are not certified as MP capable.
139 if ((c
->x86_vendor
== X86_VENDOR_AMD
) && (c
->x86
== 6)) {
141 /* Athlon 660/661 is valid. */
142 if ((c
->x86_model
==6) && ((c
->x86_mask
==0) || (c
->x86_mask
==1)))
145 /* Duron 670 is valid */
146 if ((c
->x86_model
==7) && (c
->x86_mask
==0))
150 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
151 * It's worth noting that the A5 stepping (662) of some Athlon XP's
152 * have the MP bit set.
153 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
155 if (((c
->x86_model
==6) && (c
->x86_mask
>=2)) ||
156 ((c
->x86_model
==7) && (c
->x86_mask
>=1)) ||
161 /* If we get here, it's not a certified SMP capable AMD system. */
162 printk (KERN_INFO
"WARNING: This combination of AMD processors is not suitable for SMP.\n");
163 tainted
|= TAINT_UNSAFE_SMP
;
171 * TSC synchronization.
173 * We first check wether all CPUs have their TSC's synchronized,
174 * then we print a warning if not, and always resync.
177 static atomic_t tsc_start_flag
= ATOMIC_INIT(0);
178 static atomic_t tsc_count_start
= ATOMIC_INIT(0);
179 static atomic_t tsc_count_stop
= ATOMIC_INIT(0);
180 static unsigned long long tsc_values
[NR_CPUS
];
184 extern unsigned long fast_gettimeoffset_quotient
;
187 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
188 * multiplication. Not terribly optimized but we need it at boot time only
192 * == (a1 + a2*(2^32)) / b
193 * == a1/b + a2*(2^32/b)
194 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
195 * ^---- (this multiplication can overflow)
198 static unsigned long long __init
div64 (unsigned long long a
, unsigned long b0
)
201 unsigned long long res
;
203 a1
= ((unsigned int*)&a
)[0];
204 a2
= ((unsigned int*)&a
)[1];
207 (unsigned long long)a2
* (unsigned long long)(0xffffffff/b0
) +
209 (a2
* (0xffffffff % b0
)) / b0
;
214 static void __init
synchronize_tsc_bp (void)
217 unsigned long long t0
;
218 unsigned long long sum
, avg
;
220 unsigned long one_usec
;
223 printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
225 one_usec
= ((1<<30)/fast_gettimeoffset_quotient
)*(1<<2);
227 atomic_set(&tsc_start_flag
, 1);
231 * We loop a few times to get a primed instruction cache,
232 * then the last pass is more or less synchronized and
233 * the BP and APs set their cycle counters to zero all at
234 * once. This reduces the chance of having random offsets
235 * between the processors, and guarantees that the maximum
236 * delay between the cycle counters is never bigger than
237 * the latency of information-passing (cachelines) between
240 for (i
= 0; i
< NR_LOOPS
; i
++) {
242 * all APs synchronize but they loop on '== num_cpus'
244 while (atomic_read(&tsc_count_start
) != num_booting_cpus()-1)
246 atomic_set(&tsc_count_stop
, 0);
249 * this lets the APs save their current TSC:
251 atomic_inc(&tsc_count_start
);
253 rdtscll(tsc_values
[smp_processor_id()]);
255 * We clear the TSC in the last loop:
261 * Wait for all APs to leave the synchronization point:
263 while (atomic_read(&tsc_count_stop
) != num_booting_cpus()-1)
265 atomic_set(&tsc_count_start
, 0);
267 atomic_inc(&tsc_count_stop
);
271 for (i
= 0; i
< NR_CPUS
; i
++) {
272 if (test_bit(i
, &cpu_callout_map
)) {
277 avg
= div64(sum
, num_booting_cpus());
280 for (i
= 0; i
< NR_CPUS
; i
++) {
281 if (!test_bit(i
, &cpu_callout_map
))
283 delta
= tsc_values
[i
] - avg
;
287 * We report bigger than 2 microseconds clock differences.
289 if (delta
> 2*one_usec
) {
295 realdelta
= div64(delta
, one_usec
);
296 if (tsc_values
[i
] < avg
)
297 realdelta
= -realdelta
;
299 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i
, realdelta
);
309 static void __init
synchronize_tsc_ap (void)
314 * Not every cpu is online at the time
315 * this gets called, so we first wait for the BP to
316 * finish SMP initialization:
318 while (!atomic_read(&tsc_start_flag
)) mb();
320 for (i
= 0; i
< NR_LOOPS
; i
++) {
321 atomic_inc(&tsc_count_start
);
322 while (atomic_read(&tsc_count_start
) != num_booting_cpus())
325 rdtscll(tsc_values
[smp_processor_id()]);
329 atomic_inc(&tsc_count_stop
);
330 while (atomic_read(&tsc_count_stop
) != num_booting_cpus()) mb();
335 extern void calibrate_delay(void);
337 static atomic_t init_deasserted
;
339 void __init
smp_callin(void)
342 unsigned long timeout
;
345 * If waken up by an INIT in an 82489DX configuration
346 * we may get here before an INIT-deassert IPI reaches
347 * our local APIC. We have to wait for the IPI or we'll
348 * lock up on an APIC access.
350 if (!clustered_apic_mode
)
351 while (!atomic_read(&init_deasserted
));
354 * (This works even if the APIC is not enabled.)
356 phys_id
= GET_APIC_ID(apic_read(APIC_ID
));
357 cpuid
= smp_processor_id();
358 if (test_bit(cpuid
, &cpu_callin_map
)) {
359 printk("huh, phys CPU#%d, CPU#%d already present??\n",
363 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid
, phys_id
);
366 * STARTUP IPIs are fragile beasts as they might sometimes
367 * trigger some glue motherboard logic. Complete APIC bus
368 * silence for 1 second, this overestimates the time the
369 * boot CPU is spending to send the up to 2 STARTUP IPIs
370 * by a factor of two. This should be enough.
374 * Waiting 2s total for startup (udelay is not yet working)
376 timeout
= jiffies
+ 2*HZ
;
377 while (time_before(jiffies
, timeout
)) {
379 * Has the boot CPU finished it's STARTUP sequence?
381 if (test_bit(cpuid
, &cpu_callout_map
))
386 if (!time_before(jiffies
, timeout
)) {
387 printk("BUG: CPU%d started up but did not get a callout!\n",
393 * the boot CPU has finished the init stage and is spinning
394 * on callin_map until we finish. We are free to set up this
395 * CPU, first the APIC. (this is probably redundant on most
399 Dprintk("CALLIN, before setup_local_APIC().\n");
401 * Because we use NMIs rather than the INIT-STARTUP sequence to
402 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
404 if (clustered_apic_mode
)
414 Dprintk("Stack at about %p\n",&cpuid
);
417 * Save our processor parameters
419 smp_store_cpu_info(cpuid
);
421 disable_APIC_timer();
423 * Allow the master to continue.
425 set_bit(cpuid
, &cpu_callin_map
);
428 * Synchronize the TSC with the BP
431 synchronize_tsc_ap();
436 extern int cpu_idle(void);
439 * Activate a secondary processor.
441 int __init
start_secondary(void *unused
)
444 * Dont put anything before smp_callin(), SMP
445 * booting is too fragile that we want to limit the
446 * things done here to the most necessary things.
450 while (!test_bit(smp_processor_id(), &smp_commenced_mask
))
452 setup_secondary_APIC_clock();
453 if (nmi_watchdog
== NMI_IO_APIC
) {
454 disable_8259A_irq(0);
455 enable_NMI_through_LVT0(NULL
);
460 * low-memory mappings have been cleared, flush them from
461 * the local TLBs too.
464 set_bit(smp_processor_id(), &cpu_online_map
);
470 * Everything has been set up for the secondary
471 * CPUs - they just need to reload everything
472 * from the task structure
473 * This function must not return.
475 void __init
initialize_secondary(void)
478 * We don't actually need to load the full TSS,
479 * basically just the stack pointer and the eip.
486 :"r" (current
->thread
.esp
),"r" (current
->thread
.eip
));
494 static struct task_struct
* __init
fork_by_hand(void)
498 * don't care about the eip and regs settings since
499 * we'll never reschedule the forked task.
501 return do_fork(CLONE_VM
|CLONE_IDLETASK
, 0, ®s
, 0, NULL
);
504 /* which physical APIC ID maps to which logical CPU number */
505 volatile int physical_apicid_2_cpu
[MAX_APICID
];
506 /* which logical CPU number maps to which physical APIC ID */
507 volatile int cpu_2_physical_apicid
[NR_CPUS
];
509 /* which logical APIC ID maps to which logical CPU number */
510 volatile int logical_apicid_2_cpu
[MAX_APICID
];
511 /* which logical CPU number maps to which logical APIC ID */
512 volatile int cpu_2_logical_apicid
[NR_CPUS
];
514 static inline void init_cpu_to_apicid(void)
515 /* Initialize all maps between cpu number and apicids */
519 for (apicid
= 0; apicid
< MAX_APICID
; apicid
++) {
520 physical_apicid_2_cpu
[apicid
] = -1;
521 logical_apicid_2_cpu
[apicid
] = -1;
523 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
524 cpu_2_physical_apicid
[cpu
] = -1;
525 cpu_2_logical_apicid
[cpu
] = -1;
529 static inline void map_cpu_to_boot_apicid(int cpu
, int apicid
)
531 * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
532 * else physical apic ids
535 if (clustered_apic_mode
) {
536 logical_apicid_2_cpu
[apicid
] = cpu
;
537 cpu_2_logical_apicid
[cpu
] = apicid
;
539 physical_apicid_2_cpu
[apicid
] = cpu
;
540 cpu_2_physical_apicid
[cpu
] = apicid
;
544 static inline void unmap_cpu_to_boot_apicid(int cpu
, int apicid
)
546 * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
547 * else physical apic ids
550 if (clustered_apic_mode
) {
551 logical_apicid_2_cpu
[apicid
] = -1;
552 cpu_2_logical_apicid
[cpu
] = -1;
554 physical_apicid_2_cpu
[apicid
] = -1;
555 cpu_2_physical_apicid
[cpu
] = -1;
560 static inline void inquire_remote_apic(int apicid
)
562 int i
, regs
[] = { APIC_ID
>> 4, APIC_LVR
>> 4, APIC_SPIV
>> 4 };
563 char *names
[] = { "ID", "VERSION", "SPIV" };
566 printk("Inquiring remote APIC #%d...\n", apicid
);
568 for (i
= 0; i
< sizeof(regs
) / sizeof(*regs
); i
++) {
569 printk("... APIC #%d %s: ", apicid
, names
[i
]);
574 apic_wait_icr_idle();
576 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(apicid
));
577 apic_write_around(APIC_ICR
, APIC_DM_REMRD
| regs
[i
]);
582 status
= apic_read(APIC_ICR
) & APIC_ICR_RR_MASK
;
583 } while (status
== APIC_ICR_RR_INPROG
&& timeout
++ < 1000);
586 case APIC_ICR_RR_VALID
:
587 status
= apic_read(APIC_RRR
);
588 printk("%08x\n", status
);
597 static int __init
wakeup_secondary_via_NMI(int logical_apicid
)
599 * Poke the other CPU in the eye to wake it up. Remember that the normal
600 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
601 * won't ... remember to clear down the APIC, etc later.
604 unsigned long send_status
= 0, accept_status
= 0;
608 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(logical_apicid
));
610 /* Boot on the stack */
611 /* Kick the second */
612 apic_write_around(APIC_ICR
, APIC_DM_NMI
| APIC_DEST_LOGICAL
);
614 Dprintk("Waiting for send to finish...\n");
619 send_status
= apic_read(APIC_ICR
) & APIC_ICR_BUSY
;
620 } while (send_status
&& (timeout
++ < 1000));
623 * Give the other CPU some time to accept the IPI.
627 * Due to the Pentium erratum 3AP.
629 maxlvt
= get_maxlvt();
631 apic_read_around(APIC_SPIV
);
632 apic_write(APIC_ESR
, 0);
634 accept_status
= (apic_read(APIC_ESR
) & 0xEF);
635 Dprintk("NMI sent.\n");
638 printk("APIC never delivered???\n");
640 printk("APIC delivery error (%lx).\n", accept_status
);
642 return (send_status
| accept_status
);
645 static int __init
wakeup_secondary_via_INIT(int phys_apicid
, unsigned long start_eip
)
647 unsigned long send_status
= 0, accept_status
= 0;
648 int maxlvt
, timeout
, num_starts
, j
;
650 Dprintk("Asserting INIT.\n");
653 * Turn INIT on target chip
655 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
660 apic_write_around(APIC_ICR
, APIC_INT_LEVELTRIG
| APIC_INT_ASSERT
663 Dprintk("Waiting for send to finish...\n");
668 send_status
= apic_read(APIC_ICR
) & APIC_ICR_BUSY
;
669 } while (send_status
&& (timeout
++ < 1000));
673 Dprintk("Deasserting INIT.\n");
676 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
679 apic_write_around(APIC_ICR
, APIC_INT_LEVELTRIG
| APIC_DM_INIT
);
681 Dprintk("Waiting for send to finish...\n");
686 send_status
= apic_read(APIC_ICR
) & APIC_ICR_BUSY
;
687 } while (send_status
&& (timeout
++ < 1000));
689 atomic_set(&init_deasserted
, 1);
692 * Should we send STARTUP IPIs ?
694 * Determine this based on the APIC version.
695 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
697 if (APIC_INTEGRATED(apic_version
[phys_apicid
]))
703 * Run STARTUP IPI loop.
705 Dprintk("#startup loops: %d.\n", num_starts
);
707 maxlvt
= get_maxlvt();
709 for (j
= 1; j
<= num_starts
; j
++) {
710 Dprintk("Sending STARTUP #%d.\n",j
);
711 apic_read_around(APIC_SPIV
);
712 apic_write(APIC_ESR
, 0);
714 Dprintk("After apic_write.\n");
721 apic_write_around(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
723 /* Boot on the stack */
724 /* Kick the second */
725 apic_write_around(APIC_ICR
, APIC_DM_STARTUP
726 | (start_eip
>> 12));
729 * Give the other CPU some time to accept the IPI.
733 Dprintk("Startup point 1.\n");
735 Dprintk("Waiting for send to finish...\n");
740 send_status
= apic_read(APIC_ICR
) & APIC_ICR_BUSY
;
741 } while (send_status
&& (timeout
++ < 1000));
744 * Give the other CPU some time to accept the IPI.
748 * Due to the Pentium erratum 3AP.
751 apic_read_around(APIC_SPIV
);
752 apic_write(APIC_ESR
, 0);
754 accept_status
= (apic_read(APIC_ESR
) & 0xEF);
755 if (send_status
|| accept_status
)
758 Dprintk("After Startup.\n");
761 printk("APIC never delivered???\n");
763 printk("APIC delivery error (%lx).\n", accept_status
);
765 return (send_status
| accept_status
);
768 extern unsigned long cpu_initialized
;
770 static void __init
do_boot_cpu (int apicid
)
772 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
773 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
776 struct task_struct
*idle
;
777 unsigned long boot_error
= 0;
779 unsigned long start_eip
;
780 unsigned short nmi_high
, nmi_low
;
784 * We can't use kernel_thread since we must avoid to
785 * reschedule the child.
787 idle
= fork_by_hand();
789 panic("failed fork for CPU %d", cpu
);
792 * We remove it from the pidhash and the runqueue
793 * once we got the process:
795 init_idle(idle
, cpu
);
797 map_cpu_to_boot_apicid(cpu
, apicid
);
799 idle
->thread
.eip
= (unsigned long) start_secondary
;
801 unhash_process(idle
);
803 /* start_eip had better be page-aligned! */
804 start_eip
= setup_trampoline();
806 /* So we see what's up */
807 printk("Booting processor %d/%d eip %lx\n", cpu
, apicid
, start_eip
);
808 stack_start
.esp
= (void *) (1024 + PAGE_SIZE
+ (char *)idle
->thread_info
);
811 * This grunge runs the startup process for
812 * the targeted processor.
815 atomic_set(&init_deasserted
, 0);
817 Dprintk("Setting warm reset code and vector.\n");
819 if (clustered_apic_mode
) {
820 /* stash the current NMI vector, so we can put things back */
821 nmi_high
= *((volatile unsigned short *) TRAMPOLINE_HIGH
);
822 nmi_low
= *((volatile unsigned short *) TRAMPOLINE_LOW
);
825 CMOS_WRITE(0xa, 0xf);
828 *((volatile unsigned short *) TRAMPOLINE_HIGH
) = start_eip
>> 4;
830 *((volatile unsigned short *) TRAMPOLINE_LOW
) = start_eip
& 0xf;
834 * Be paranoid about clearing APIC errors.
836 if (!clustered_apic_mode
&& APIC_INTEGRATED(apic_version
[apicid
])) {
837 apic_read_around(APIC_SPIV
);
838 apic_write(APIC_ESR
, 0);
843 * Status is now clean
848 * Starting actual IPI sequence...
851 if (clustered_apic_mode
)
852 boot_error
= wakeup_secondary_via_NMI(apicid
);
854 boot_error
= wakeup_secondary_via_INIT(apicid
, start_eip
);
858 * allow APs to start initializing.
860 Dprintk("Before Callout %d.\n", cpu
);
861 set_bit(cpu
, &cpu_callout_map
);
862 Dprintk("After Callout %d.\n", cpu
);
865 * Wait 5s total for a response
867 for (timeout
= 0; timeout
< 50000; timeout
++) {
868 if (test_bit(cpu
, &cpu_callin_map
))
869 break; /* It has booted */
873 if (test_bit(cpu
, &cpu_callin_map
)) {
874 /* number CPUs logically, starting from 1 (BSP is 0) */
876 printk("CPU%d: ", cpu
);
877 print_cpu_info(&cpu_data
[cpu
]);
878 Dprintk("CPU has booted.\n");
881 if (*((volatile unsigned char *)phys_to_virt(8192))
883 /* trampoline started but...? */
884 printk("Stuck ??\n");
886 /* trampoline code not run */
887 printk("Not responding.\n");
889 if (!clustered_apic_mode
)
890 inquire_remote_apic(apicid
);
895 /* Try to put things back the way they were before ... */
896 unmap_cpu_to_boot_apicid(cpu
, apicid
);
897 clear_bit(cpu
, &cpu_callout_map
); /* was set here (do_boot_cpu()) */
898 clear_bit(cpu
, &cpu_initialized
); /* was set by cpu_init() */
902 /* mark "stuck" area as not stuck */
903 *((volatile unsigned long *)phys_to_virt(8192)) = 0;
905 if(clustered_apic_mode
) {
906 printk("Restoring NMI vector\n");
907 *((volatile unsigned short *) TRAMPOLINE_HIGH
) = nmi_high
;
908 *((volatile unsigned short *) TRAMPOLINE_LOW
) = nmi_low
;
912 cycles_t cacheflush_time
;
913 unsigned long cache_decay_ticks
;
915 static void smp_tune_scheduling (void)
917 unsigned long cachesize
; /* kB */
918 unsigned long bandwidth
= 350; /* MB/s */
920 * Rough estimation for SMP scheduling, this is the number of
921 * cycles it takes for a fully memory-limited process to flush
922 * the SMP-local cache.
924 * (For a P5 this pretty much means we will choose another idle
925 * CPU almost always at wakeup time (this is due to the small
926 * L1 cache), on PIIs it's around 50-100 usecs, depending on
932 * this basically disables processor-affinity
933 * scheduling on SMP without a TSC.
938 cachesize
= boot_cpu_data
.x86_cache_size
;
939 if (cachesize
== -1) {
940 cachesize
= 16; /* Pentiums, 2x8kB cache */
944 cacheflush_time
= (cpu_khz
>>10) * (cachesize
<<10) / bandwidth
;
947 cache_decay_ticks
= (long)cacheflush_time
/cpu_khz
* HZ
/ 1000;
949 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
950 (long)cacheflush_time
/(cpu_khz
/1000),
951 ((long)cacheflush_time
*100/(cpu_khz
/1000)) % 100);
952 printk("task migration cache decay timeout: %ld msecs.\n",
953 (cache_decay_ticks
+ 1) * 1000 / HZ
);
957 * Cycle through the processors sending APIC IPIs to boot each.
960 extern int prof_multiplier
[NR_CPUS
];
961 extern int prof_old_multiplier
[NR_CPUS
];
962 extern int prof_counter
[NR_CPUS
];
964 static int boot_cpu_logical_apicid
;
965 /* Where the IO area was mapped on multiquad, always 0 otherwise */
968 int cpu_sibling_map
[NR_CPUS
] __cacheline_aligned
;
970 static void __init
smp_boot_cpus(unsigned int max_cpus
)
972 int apicid
, cpu
, bit
;
975 * Initialize the logical to physical CPU number mapping
976 * and the per-CPU profiling counter/multiplier
979 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
980 prof_counter
[cpu
] = 1;
981 prof_old_multiplier
[cpu
] = 1;
982 prof_multiplier
[cpu
] = 1;
985 init_cpu_to_apicid();
988 * Setup boot CPU information
990 smp_store_cpu_info(0); /* Final full version of the data */
991 printk("CPU%d: ", 0);
992 print_cpu_info(&cpu_data
[0]);
995 * We have the boot CPU online for sure.
997 set_bit(0, &cpu_online_map
);
998 set_bit(0, &cpu_callout_map
);
999 boot_cpu_logical_apicid
= logical_smp_processor_id();
1000 map_cpu_to_boot_apicid(0, boot_cpu_apicid
);
1002 current_thread_info()->cpu
= 0;
1003 smp_tune_scheduling();
1006 * If we couldnt find an SMP configuration at boot time,
1007 * get out of here now!
1009 if (!smp_found_config
) {
1010 printk(KERN_NOTICE
"SMP motherboard not detected.\n");
1011 smpboot_clear_io_apic_irqs();
1012 phys_cpu_present_map
= 1;
1013 if (APIC_init_uniprocessor())
1014 printk(KERN_NOTICE
"Local APIC not detected."
1015 " Using dummy APIC emulation.\n");
1020 * Should not be necessary because the MP table should list the boot
1021 * CPU too, but we do it for the sake of robustness anyway.
1022 * Makes no sense to do this check in clustered apic mode, so skip it
1024 if (!clustered_apic_mode
&&
1025 !test_bit(boot_cpu_physical_apicid
, &phys_cpu_present_map
)) {
1026 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1027 boot_cpu_physical_apicid
);
1028 phys_cpu_present_map
|= (1 << hard_smp_processor_id());
1032 * If we couldn't find a local APIC, then get out of here now!
1034 if (APIC_INTEGRATED(apic_version
[boot_cpu_physical_apicid
]) && !cpu_has_apic
) {
1035 printk(KERN_ERR
"BIOS bug, local APIC #%d not detected!...\n",
1036 boot_cpu_physical_apicid
);
1037 printk(KERN_ERR
"... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1038 smpboot_clear_io_apic_irqs();
1039 phys_cpu_present_map
= 1;
1043 verify_local_APIC();
1046 * If SMP should be disabled, then really disable it!
1049 smp_found_config
= 0;
1050 printk(KERN_INFO
"SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1051 smpboot_clear_io_apic_irqs();
1052 phys_cpu_present_map
= 1;
1059 if (GET_APIC_ID(apic_read(APIC_ID
)) != boot_cpu_physical_apicid
)
1062 if (clustered_apic_mode
&& (numnodes
> 1)) {
1063 printk("Remapping cross-quad port I/O for %d quads\n",
1065 xquad_portio
= ioremap (XQUAD_PORTIO_BASE
,
1066 numnodes
* XQUAD_PORTIO_QUAD
);
1067 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
1068 (u_long
) xquad_portio
,
1069 (u_long
) numnodes
* XQUAD_PORTIO_QUAD
);
1073 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1075 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1076 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1077 * clustered apic ID.
1079 Dprintk("CPU present map: %lx\n", phys_cpu_present_map
);
1081 for (bit
= 0; bit
< NR_CPUS
; bit
++) {
1082 apicid
= cpu_present_to_apicid(bit
);
1084 * Don't even attempt to start the boot CPU!
1086 if (apicid
== boot_cpu_apicid
)
1089 if (!(phys_cpu_present_map
& (1 << bit
)))
1091 if (max_cpus
<= cpucount
+1)
1094 do_boot_cpu(apicid
);
1097 * Make sure we unmap all failed CPUs
1099 if ((boot_apicid_to_cpu(apicid
) == -1) &&
1100 (phys_cpu_present_map
& (1 << bit
)))
1101 printk("CPU #%d not responding - cannot use it.\n",
1106 * Cleanup possible dangling ends...
1108 smpboot_setup_warm_reset_vector();
1111 * Allow the user to impress friends.
1114 Dprintk("Before bogomips.\n");
1116 printk(KERN_ERR
"Error: only one processor found.\n");
1118 unsigned long bogosum
= 0;
1119 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
1120 if (cpu_callout_map
& (1<<cpu
))
1121 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
1122 printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1124 bogosum
/(500000/HZ
),
1125 (bogosum
/(5000/HZ
))%100);
1126 Dprintk("Before bogocount - setting activated=1.\n");
1130 printk(KERN_WARNING
"WARNING: SMP operation may be unreliable with B stepping processors.\n");
1131 Dprintk("Boot done.\n");
1134 * If Hyper-Threading is avaialble, construct cpu_sibling_map[], so
1135 * that we can tell the sibling CPU efficiently.
1137 if (cpu_has_ht
&& smp_num_siblings
> 1) {
1138 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
1139 cpu_sibling_map
[cpu
] = NO_PROC_ID
;
1141 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
1143 if (!test_bit(cpu
, &cpu_callout_map
)) continue;
1145 for (i
= 0; i
< NR_CPUS
; i
++) {
1146 if (i
== cpu
|| !test_bit(i
, &cpu_callout_map
))
1148 if (phys_proc_id
[cpu
] == phys_proc_id
[i
]) {
1149 cpu_sibling_map
[cpu
] = i
;
1150 printk("cpu_sibling_map[%d] = %d\n", cpu
, cpu_sibling_map
[cpu
]);
1154 if (cpu_sibling_map
[cpu
] == NO_PROC_ID
) {
1155 smp_num_siblings
= 1;
1156 printk(KERN_WARNING
"WARNING: No sibling found for CPU %d.\n", cpu
);
1161 smpboot_setup_io_apic();
1163 setup_boot_APIC_clock();
1166 * Synchronize the TSC with the AP
1168 if (cpu_has_tsc
&& cpucount
)
1169 synchronize_tsc_bp();
1172 /* These are wrappers to interface to the new boot process. Someone
1173 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1174 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1176 smp_boot_cpus(max_cpus
);
1179 int __devinit
__cpu_up(unsigned int cpu
)
1181 /* This only works at boot for x86. See "rewrite" above. */
1182 if (test_bit(cpu
, &smp_commenced_mask
))
1185 /* In case one didn't come up */
1186 if (!test_bit(cpu
, &cpu_callin_map
))
1189 /* Unleash the CPU! */
1190 set_bit(cpu
, &smp_commenced_mask
);
1191 while (!test_bit(cpu
, &cpu_online_map
))
1196 void __init
smp_cpus_done(unsigned int max_cpus
)
1201 void __init
smp_intr_init()
1204 * IRQ0 must be given a fixed assignment and initialized,
1205 * because it's used before the IO-APIC is set up.
1207 set_intr_gate(FIRST_DEVICE_VECTOR
, interrupt
[0]);
1210 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1211 * IPI, driven by wakeup.
1213 set_intr_gate(RESCHEDULE_VECTOR
, reschedule_interrupt
);
1215 /* IPI for invalidation */
1216 set_intr_gate(INVALIDATE_TLB_VECTOR
, invalidate_interrupt
);
1218 /* IPI for generic function call */
1219 set_intr_gate(CALL_FUNCTION_VECTOR
, call_function_interrupt
);
1221 /* thermal monitor LVT interrupt */
1222 #ifdef CONFIG_X86_MCE_P4THERMAL
1223 set_intr_gate(THERMAL_APIC_VECTOR
, thermal_interrupt
);