1 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/percpu.h>
6 #include <linux/bootmem.h>
14 /* Number of siblings per CPU package */
15 int smp_num_siblings
= 1;
16 EXPORT_SYMBOL(smp_num_siblings
);
18 /* Last level cache ID of each logical CPU */
19 DEFINE_PER_CPU(u16
, cpu_llc_id
) = BAD_APICID
;
21 /* bitmap of online cpus */
22 cpumask_t cpu_online_map __read_mostly
;
23 EXPORT_SYMBOL(cpu_online_map
);
25 cpumask_t cpu_callin_map
;
26 cpumask_t cpu_callout_map
;
27 cpumask_t cpu_possible_map
;
28 EXPORT_SYMBOL(cpu_possible_map
);
30 /* representing HT siblings of each logical CPU */
31 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
);
32 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
34 /* representing HT and core siblings of each logical CPU */
35 DEFINE_PER_CPU(cpumask_t
, cpu_core_map
);
36 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
38 /* Per CPU bogomips and other parameters */
39 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86
, cpu_info
);
40 EXPORT_PER_CPU_SYMBOL(cpu_info
);
42 /* ready for x86_64, no harm for x86, since it will overwrite after alloc */
43 unsigned char *trampoline_base
= __va(SMP_TRAMPOLINE_BASE
);
45 /* representing cpus for which sibling maps can be computed */
46 static cpumask_t cpu_sibling_setup_map
;
48 /* Set if we find a B stepping CPU */
49 int __cpuinitdata smp_b_stepping
;
51 static void __cpuinit
smp_apply_quirks(struct cpuinfo_x86
*c
)
55 * Mask B, Pentium, but not Pentium MMX
57 if (c
->x86_vendor
== X86_VENDOR_INTEL
&&
59 c
->x86_mask
>= 1 && c
->x86_mask
<= 4 &&
62 * Remember we have B step Pentia with bugs
67 * Certain Athlons might work (for various values of 'work') in SMP
68 * but they are not certified as MP capable.
70 if ((c
->x86_vendor
== X86_VENDOR_AMD
) && (c
->x86
== 6)) {
72 if (num_possible_cpus() == 1)
75 /* Athlon 660/661 is valid. */
76 if ((c
->x86_model
== 6) && ((c
->x86_mask
== 0) ||
80 /* Duron 670 is valid */
81 if ((c
->x86_model
== 7) && (c
->x86_mask
== 0))
85 * Athlon 662, Duron 671, and Athlon >model 7 have capability
86 * bit. It's worth noting that the A5 stepping (662) of some
87 * Athlon XP's have the MP bit set.
88 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
91 if (((c
->x86_model
== 6) && (c
->x86_mask
>= 2)) ||
92 ((c
->x86_model
== 7) && (c
->x86_mask
>= 1)) ||
97 /* If we get here, not a certified SMP capable AMD system. */
98 add_taint(TAINT_UNSAFE_SMP
);
106 void smp_checks(void)
109 printk(KERN_WARNING
"WARNING: SMP operation may be unreliable"
110 "with B stepping processors.\n");
113 * Don't taint if we are running SMP kernel on a single non-MP
116 if (tainted
& TAINT_UNSAFE_SMP
) {
117 if (cpus_weight(cpu_present_map
))
118 printk(KERN_INFO
"WARNING: This combination of AMD"
119 "processors is not suitable for SMP.\n");
121 tainted
&= ~TAINT_UNSAFE_SMP
;
126 * The bootstrap kernel entry code has set these up. Save them for
130 void __cpuinit
smp_store_cpu_info(int id
)
132 struct cpuinfo_x86
*c
= &cpu_data(id
);
137 identify_secondary_cpu(c
);
142 void __cpuinit
set_cpu_sibling_map(int cpu
)
145 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
147 cpu_set(cpu
, cpu_sibling_setup_map
);
149 if (smp_num_siblings
> 1) {
150 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
151 if (c
->phys_proc_id
== cpu_data(i
).phys_proc_id
&&
152 c
->cpu_core_id
== cpu_data(i
).cpu_core_id
) {
153 cpu_set(i
, per_cpu(cpu_sibling_map
, cpu
));
154 cpu_set(cpu
, per_cpu(cpu_sibling_map
, i
));
155 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
156 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
157 cpu_set(i
, c
->llc_shared_map
);
158 cpu_set(cpu
, cpu_data(i
).llc_shared_map
);
162 cpu_set(cpu
, per_cpu(cpu_sibling_map
, cpu
));
165 cpu_set(cpu
, c
->llc_shared_map
);
167 if (current_cpu_data
.x86_max_cores
== 1) {
168 per_cpu(cpu_core_map
, cpu
) = per_cpu(cpu_sibling_map
, cpu
);
173 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
174 if (per_cpu(cpu_llc_id
, cpu
) != BAD_APICID
&&
175 per_cpu(cpu_llc_id
, cpu
) == per_cpu(cpu_llc_id
, i
)) {
176 cpu_set(i
, c
->llc_shared_map
);
177 cpu_set(cpu
, cpu_data(i
).llc_shared_map
);
179 if (c
->phys_proc_id
== cpu_data(i
).phys_proc_id
) {
180 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
181 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
183 * Does this new cpu bringup a new core?
185 if (cpus_weight(per_cpu(cpu_sibling_map
, cpu
)) == 1) {
187 * for each core in package, increment
188 * the booted_cores for this new cpu
190 if (first_cpu(per_cpu(cpu_sibling_map
, i
)) == i
)
193 * increment the core count for all
194 * the other cpus in this package
197 cpu_data(i
).booted_cores
++;
198 } else if (i
!= cpu
&& !c
->booted_cores
)
199 c
->booted_cores
= cpu_data(i
).booted_cores
;
204 /* maps the cpu to the sched domain representing multi-core */
205 cpumask_t
cpu_coregroup_map(int cpu
)
207 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
209 * For perf, we return last level cache shared map.
210 * And for power savings, we return cpu_core_map
212 if (sched_mc_power_savings
|| sched_smt_power_savings
)
213 return per_cpu(cpu_core_map
, cpu
);
215 return c
->llc_shared_map
;
219 * Currently trivial. Write the real->protected mode
220 * bootstrap into the page concerned. The caller
221 * has made sure it's suitably aligned.
224 unsigned long __cpuinit
setup_trampoline(void)
226 memcpy(trampoline_base
, trampoline_data
,
227 trampoline_end
- trampoline_data
);
228 return virt_to_phys(trampoline_base
);
233 * We are called very early to get the low memory for the
234 * SMP bootup trampoline page.
236 void __init
smp_alloc_memory(void)
238 trampoline_base
= alloc_bootmem_low_pages(PAGE_SIZE
);
240 * Has to be in very low memory so we can execute
243 if (__pa(trampoline_base
) >= 0x9F000)
248 void impress_friends(void)
251 unsigned long bogosum
= 0;
253 * Allow the user to impress friends.
255 Dprintk("Before bogomips.\n");
256 for_each_possible_cpu(cpu
)
257 if (cpu_isset(cpu
, cpu_callout_map
))
258 bogosum
+= cpu_data(cpu
).loops_per_jiffy
;
260 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
261 cpus_weight(cpu_present_map
),
263 (bogosum
/(5000/HZ
))%100);
265 Dprintk("Before bogocount - setting activated=1.\n");
268 #ifdef CONFIG_HOTPLUG_CPU
269 void remove_siblinginfo(int cpu
)
272 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
274 for_each_cpu_mask(sibling
, per_cpu(cpu_core_map
, cpu
)) {
275 cpu_clear(cpu
, per_cpu(cpu_core_map
, sibling
));
277 * last thread sibling in this cpu core going down
279 if (cpus_weight(per_cpu(cpu_sibling_map
, cpu
)) == 1)
280 cpu_data(sibling
).booted_cores
--;
283 for_each_cpu_mask(sibling
, per_cpu(cpu_sibling_map
, cpu
))
284 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, sibling
));
285 cpus_clear(per_cpu(cpu_sibling_map
, cpu
));
286 cpus_clear(per_cpu(cpu_core_map
, cpu
));
289 cpu_clear(cpu
, cpu_sibling_setup_map
);
292 int additional_cpus __initdata
= -1;
294 static __init
int setup_additional_cpus(char *s
)
296 return s
&& get_option(&s
, &additional_cpus
) ? 0 : -EINVAL
;
298 early_param("additional_cpus", setup_additional_cpus
);
301 * cpu_possible_map should be static, it cannot change as cpu's
302 * are onlined, or offlined. The reason is per-cpu data-structures
303 * are allocated by some modules at init time, and dont expect to
304 * do this dynamically on cpu arrival/departure.
305 * cpu_present_map on the other hand can change dynamically.
306 * In case when cpu_hotplug is not compiled, then we resort to current
307 * behaviour, which is cpu_possible == cpu_present.
310 * Three ways to find out the number of additional hotplug CPUs:
311 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
312 * - The user can overwrite it with additional_cpus=NUM
313 * - Otherwise don't reserve additional CPUs.
314 * We do this because additional CPUs waste a lot of memory.
317 __init
void prefill_possible_map(void)
322 if (additional_cpus
== -1) {
323 if (disabled_cpus
> 0)
324 additional_cpus
= disabled_cpus
;
328 possible
= num_processors
+ additional_cpus
;
329 if (possible
> NR_CPUS
)
332 printk(KERN_INFO
"SMP: Allowing %d CPUs, %d hotplug CPUs\n",
333 possible
, max_t(int, possible
- num_processors
, 0));
335 for (i
= 0; i
< possible
; i
++)
336 cpu_set(i
, cpu_possible_map
);
339 static void __ref
remove_cpu_from_maps(int cpu
)
341 cpu_clear(cpu
, cpu_online_map
);
343 cpu_clear(cpu
, cpu_callout_map
);
344 cpu_clear(cpu
, cpu_callin_map
);
345 /* was set by cpu_init() */
346 clear_bit(cpu
, (unsigned long *)&cpu_initialized
);
347 clear_node_cpumask(cpu
);
351 int __cpu_disable(void)
353 int cpu
= smp_processor_id();
356 * Perhaps use cpufreq to drop frequency, but that could go
359 * We won't take down the boot processor on i386 due to some
360 * interrupts only being able to be serviced by the BSP.
361 * Especially so if we're not using an IOAPIC -zwane
366 if (nmi_watchdog
== NMI_LOCAL_APIC
)
367 stop_apic_nmi_watchdog(NULL
);
372 * Allow any queued timer interrupts to get serviced
373 * This is only a temporary solution until we cleanup
374 * fixup_irqs as we do for IA64.
380 remove_siblinginfo(cpu
);
382 /* It's now safe to remove this processor from the online map */
383 remove_cpu_from_maps(cpu
);
384 fixup_irqs(cpu_online_map
);
388 void __cpu_die(unsigned int cpu
)
390 /* We don't do anything here: idle task is faking death itself. */
393 for (i
= 0; i
< 10; i
++) {
394 /* They ack this in play_dead by setting CPU_DEAD */
395 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
) {
396 printk(KERN_INFO
"CPU %d is now offline\n", cpu
);
397 if (1 == num_online_cpus())
398 alternatives_smp_switch(0);
403 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
405 #else /* ... !CONFIG_HOTPLUG_CPU */
406 int __cpu_disable(void)
411 void __cpu_die(unsigned int cpu
)
413 /* We said "no" in __cpu_disable */
419 * If the BIOS enumerates physical processors before logical,
420 * maxcpus=N at enumeration-time can be used to disable HT.
422 static int __init
parse_maxcpus(char *arg
)
424 extern unsigned int maxcpus
;
426 maxcpus
= simple_strtoul(arg
, NULL
, 0);
429 early_param("maxcpus", parse_maxcpus
);