x86, cpu: mv display_cacheinfo -> cpu_detect_cache_sizes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / cpu / intel.c
blob40e1835b35e881d3479baec9d0bfa7f7ba48da9a
1 #include <linux/init.h>
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
6 #include <linux/smp.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/pgtable.h>
14 #include <asm/msr.h>
15 #include <asm/ds.h>
16 #include <asm/bugs.h>
17 #include <asm/cpu.h>
19 #ifdef CONFIG_X86_64
20 #include <linux/topology.h>
21 #include <asm/numa_64.h>
22 #endif
24 #include "cpu.h"
26 #ifdef CONFIG_X86_LOCAL_APIC
27 #include <asm/mpspec.h>
28 #include <asm/apic.h>
29 #endif
31 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
33 /* Unmask CPUID levels if masked: */
34 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
35 u64 misc_enable;
37 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
39 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
40 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
41 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
42 c->cpuid_level = cpuid_eax(0);
46 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
47 (c->x86 == 0x6 && c->x86_model >= 0x0e))
48 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
50 #ifdef CONFIG_X86_64
51 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
52 #else
53 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
54 if (c->x86 == 15 && c->x86_cache_alignment == 64)
55 c->x86_cache_alignment = 128;
56 #endif
58 /* CPUID workaround for 0F33/0F34 CPU */
59 if (c->x86 == 0xF && c->x86_model == 0x3
60 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
61 c->x86_phys_bits = 36;
64 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
65 * with P/T states and does not stop in deep C-states.
67 * It is also reliable across cores and sockets. (but not across
68 * cabinets - we turn it off in that case explicitly.)
70 if (c->x86_power & (1 << 8)) {
71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
73 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
74 sched_clock_stable = 1;
78 * There is a known erratum on Pentium III and Core Solo
79 * and Core Duo CPUs.
80 * " Page with PAT set to WC while associated MTRR is UC
81 * may consolidate to UC "
82 * Because of this erratum, it is better to stick with
83 * setting WC in MTRR rather than using PAT on these CPUs.
85 * Enable PAT WC only on P4, Core 2 or later CPUs.
87 if (c->x86 == 6 && c->x86_model < 15)
88 clear_cpu_cap(c, X86_FEATURE_PAT);
90 #ifdef CONFIG_KMEMCHECK
92 * P4s have a "fast strings" feature which causes single-
93 * stepping REP instructions to only generate a #DB on
94 * cache-line boundaries.
96 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
97 * (model 2) with the same problem.
99 if (c->x86 == 15) {
100 u64 misc_enable;
102 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
104 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
105 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
107 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
108 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
111 #endif
114 #ifdef CONFIG_X86_32
116 * Early probe support logic for ppro memory erratum #50
118 * This is called before we do cpu ident work
121 int __cpuinit ppro_with_ram_bug(void)
123 /* Uses data from early_cpu_detect now */
124 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
125 boot_cpu_data.x86 == 6 &&
126 boot_cpu_data.x86_model == 1 &&
127 boot_cpu_data.x86_mask < 8) {
128 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
129 return 1;
131 return 0;
134 #ifdef CONFIG_X86_F00F_BUG
135 static void __cpuinit trap_init_f00f_bug(void)
137 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
140 * Update the IDT descriptor and reload the IDT so that
141 * it uses the read-only mapped virtual address.
143 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
144 load_idt(&idt_descr);
146 #endif
148 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
150 #ifdef CONFIG_SMP
151 /* calling is from identify_secondary_cpu() ? */
152 if (c->cpu_index == boot_cpu_id)
153 return;
156 * Mask B, Pentium, but not Pentium MMX
158 if (c->x86 == 5 &&
159 c->x86_mask >= 1 && c->x86_mask <= 4 &&
160 c->x86_model <= 3) {
162 * Remember we have B step Pentia with bugs
164 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
165 "with B stepping processors.\n");
167 #endif
170 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
172 unsigned long lo, hi;
174 #ifdef CONFIG_X86_F00F_BUG
176 * All current models of Pentium and Pentium with MMX technology CPUs
177 * have the F0 0F bug, which lets nonprivileged users lock up the
178 * system.
179 * Note that the workaround only should be initialized once...
181 c->f00f_bug = 0;
182 if (!paravirt_enabled() && c->x86 == 5) {
183 static int f00f_workaround_enabled;
185 c->f00f_bug = 1;
186 if (!f00f_workaround_enabled) {
187 trap_init_f00f_bug();
188 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
189 f00f_workaround_enabled = 1;
192 #endif
195 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
196 * model 3 mask 3
198 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
199 clear_cpu_cap(c, X86_FEATURE_SEP);
202 * P4 Xeon errata 037 workaround.
203 * Hardware prefetcher may cause stale data to be loaded into the cache.
205 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
206 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
207 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
208 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
209 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
210 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
211 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
216 * See if we have a good local APIC by checking for buggy Pentia,
217 * i.e. all B steppings and the C2 stepping of P54C when using their
218 * integrated APIC (see 11AP erratum in "Pentium Processor
219 * Specification Update").
221 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
222 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
223 set_cpu_cap(c, X86_FEATURE_11AP);
226 #ifdef CONFIG_X86_INTEL_USERCOPY
228 * Set up the preferred alignment for movsl bulk memory moves
230 switch (c->x86) {
231 case 4: /* 486: untested */
232 break;
233 case 5: /* Old Pentia: untested */
234 break;
235 case 6: /* PII/PIII only like movsl with 8-byte alignment */
236 movsl_mask.mask = 7;
237 break;
238 case 15: /* P4 is OK down to 8-byte alignment */
239 movsl_mask.mask = 7;
240 break;
242 #endif
244 #ifdef CONFIG_X86_NUMAQ
245 numaq_tsc_disable();
246 #endif
248 intel_smp_check(c);
250 #else
251 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
254 #endif
256 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
258 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
259 unsigned node;
260 int cpu = smp_processor_id();
261 int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
263 /* Don't do the funky fallback heuristics the AMD version employs
264 for now. */
265 node = apicid_to_node[apicid];
266 if (node == NUMA_NO_NODE || !node_online(node))
267 node = first_node(node_online_map);
268 numa_set_node(cpu, node);
270 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
271 #endif
275 * find out the number of processor cores on the die
277 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
279 unsigned int eax, ebx, ecx, edx;
281 if (c->cpuid_level < 4)
282 return 1;
284 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
285 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
286 if (eax & 0x1f)
287 return (eax >> 26) + 1;
288 else
289 return 1;
292 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
294 /* Intel VMX MSR indicated features */
295 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
296 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
297 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
298 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
299 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
300 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
302 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
304 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
305 clear_cpu_cap(c, X86_FEATURE_VNMI);
306 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
307 clear_cpu_cap(c, X86_FEATURE_EPT);
308 clear_cpu_cap(c, X86_FEATURE_VPID);
310 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
311 msr_ctl = vmx_msr_high | vmx_msr_low;
312 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
313 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
314 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
315 set_cpu_cap(c, X86_FEATURE_VNMI);
316 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
317 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
318 vmx_msr_low, vmx_msr_high);
319 msr_ctl2 = vmx_msr_high | vmx_msr_low;
320 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
321 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
322 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
323 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
324 set_cpu_cap(c, X86_FEATURE_EPT);
325 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
326 set_cpu_cap(c, X86_FEATURE_VPID);
330 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
332 unsigned int l2 = 0;
334 early_init_intel(c);
336 intel_workarounds(c);
339 * Detect the extended topology information if available. This
340 * will reinitialise the initial_apicid which will be used
341 * in init_intel_cacheinfo()
343 detect_extended_topology(c);
345 l2 = init_intel_cacheinfo(c);
346 if (c->cpuid_level > 9) {
347 unsigned eax = cpuid_eax(10);
348 /* Check for version and the number of counters */
349 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
350 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
353 if (c->cpuid_level > 6) {
354 unsigned ecx = cpuid_ecx(6);
355 if (ecx & 0x01)
356 set_cpu_cap(c, X86_FEATURE_APERFMPERF);
359 if (cpu_has_xmm2)
360 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
361 if (cpu_has_ds) {
362 unsigned int l1;
363 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
364 if (!(l1 & (1<<11)))
365 set_cpu_cap(c, X86_FEATURE_BTS);
366 if (!(l1 & (1<<12)))
367 set_cpu_cap(c, X86_FEATURE_PEBS);
368 ds_init_intel(c);
371 if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
372 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
374 #ifdef CONFIG_X86_64
375 if (c->x86 == 15)
376 c->x86_cache_alignment = c->x86_clflush_size * 2;
377 if (c->x86 == 6)
378 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
379 #else
381 * Names for the Pentium II/Celeron processors
382 * detectable only by also checking the cache size.
383 * Dixon is NOT a Celeron.
385 if (c->x86 == 6) {
386 char *p = NULL;
388 switch (c->x86_model) {
389 case 5:
390 if (c->x86_mask == 0) {
391 if (l2 == 0)
392 p = "Celeron (Covington)";
393 else if (l2 == 256)
394 p = "Mobile Pentium II (Dixon)";
396 break;
398 case 6:
399 if (l2 == 128)
400 p = "Celeron (Mendocino)";
401 else if (c->x86_mask == 0 || c->x86_mask == 5)
402 p = "Celeron-A";
403 break;
405 case 8:
406 if (l2 == 128)
407 p = "Celeron (Coppermine)";
408 break;
411 if (p)
412 strcpy(c->x86_model_id, p);
415 if (c->x86 == 15)
416 set_cpu_cap(c, X86_FEATURE_P4);
417 if (c->x86 == 6)
418 set_cpu_cap(c, X86_FEATURE_P3);
419 #endif
421 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
423 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
424 * detection.
426 c->x86_max_cores = intel_num_cpu_cores(c);
427 #ifdef CONFIG_X86_32
428 detect_ht(c);
429 #endif
432 /* Work around errata */
433 srat_detect_node(c);
435 if (cpu_has(c, X86_FEATURE_VMX))
436 detect_vmx_virtcap(c);
439 #ifdef CONFIG_X86_32
440 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
443 * Intel PIII Tualatin. This comes in two flavours.
444 * One has 256kb of cache, the other 512. We have no way
445 * to determine which, so we use a boottime override
446 * for the 512kb model, and assume 256 otherwise.
448 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
449 size = 256;
450 return size;
452 #endif
454 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
455 .c_vendor = "Intel",
456 .c_ident = { "GenuineIntel" },
457 #ifdef CONFIG_X86_32
458 .c_models = {
459 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
461 [0] = "486 DX-25/33",
462 [1] = "486 DX-50",
463 [2] = "486 SX",
464 [3] = "486 DX/2",
465 [4] = "486 SL",
466 [5] = "486 SX/2",
467 [7] = "486 DX/2-WB",
468 [8] = "486 DX/4",
469 [9] = "486 DX/4-WB"
472 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
474 [0] = "Pentium 60/66 A-step",
475 [1] = "Pentium 60/66",
476 [2] = "Pentium 75 - 200",
477 [3] = "OverDrive PODP5V83",
478 [4] = "Pentium MMX",
479 [7] = "Mobile Pentium 75 - 200",
480 [8] = "Mobile Pentium MMX"
483 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
485 [0] = "Pentium Pro A-step",
486 [1] = "Pentium Pro",
487 [3] = "Pentium II (Klamath)",
488 [4] = "Pentium II (Deschutes)",
489 [5] = "Pentium II (Deschutes)",
490 [6] = "Mobile Pentium II",
491 [7] = "Pentium III (Katmai)",
492 [8] = "Pentium III (Coppermine)",
493 [10] = "Pentium III (Cascades)",
494 [11] = "Pentium III (Tualatin)",
497 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
499 [0] = "Pentium 4 (Unknown)",
500 [1] = "Pentium 4 (Willamette)",
501 [2] = "Pentium 4 (Northwood)",
502 [4] = "Pentium 4 (Foster)",
503 [5] = "Pentium 4 (Foster)",
507 .c_size_cache = intel_size_cache,
508 #endif
509 .c_early_init = early_init_intel,
510 .c_init = init_intel,
511 .c_x86_vendor = X86_VENDOR_INTEL,
514 cpu_dev_register(intel_cpu_dev);