x86: remove smp_apply_quirks()/smp_checks()
[linux-2.6/kvm.git] / arch / x86 / kernel / cpu / amd.c
blobf47df59016c5e98c8214548505b1ef00895fb5aa
1 #include <linux/init.h>
2 #include <linux/bitops.h>
3 #include <linux/mm.h>
5 #include <asm/io.h>
6 #include <asm/processor.h>
7 #include <asm/apic.h>
8 #include <asm/cpu.h>
10 #ifdef CONFIG_X86_64
11 # include <asm/numa_64.h>
12 # include <asm/mmconfig.h>
13 # include <asm/cacheflush.h>
14 #endif
16 #include "cpu.h"
18 #ifdef CONFIG_X86_32
20 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
21 * misexecution of code under Linux. Owners of such processors should
22 * contact AMD for precise details and a CPU swap.
24 * See http://www.multimania.com/poulot/k6bug.html
25 * http://www.amd.com/K6/k6docs/revgd.html
27 * The following test is erm.. interesting. AMD neglected to up
28 * the chip setting when fixing the bug but they also tweaked some
29 * performance at the same time..
32 extern void vide(void);
33 __asm__(".align 4\nvide: ret");
35 static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
38 * General Systems BIOSen alias the cpu frequency registers
39 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
40 * drivers subsequently pokes it, and changes the CPU speed.
41 * Workaround : Remove the unneeded alias.
43 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
44 #define CBAR_ENB (0x80000000)
45 #define CBAR_KEY (0X000000CB)
46 if (c->x86_model == 9 || c->x86_model == 10) {
47 if (inl (CBAR) & CBAR_ENB)
48 outl (0 | CBAR_KEY, CBAR);
53 static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
55 u32 l, h;
56 int mbytes = num_physpages >> (20-PAGE_SHIFT);
58 if (c->x86_model < 6) {
59 /* Based on AMD doc 20734R - June 2000 */
60 if (c->x86_model == 0) {
61 clear_cpu_cap(c, X86_FEATURE_APIC);
62 set_cpu_cap(c, X86_FEATURE_PGE);
64 return;
67 if (c->x86_model == 6 && c->x86_mask == 1) {
68 const int K6_BUG_LOOP = 1000000;
69 int n;
70 void (*f_vide)(void);
71 unsigned long d, d2;
73 printk(KERN_INFO "AMD K6 stepping B detected - ");
76 * It looks like AMD fixed the 2.6.2 bug and improved indirect
77 * calls at the same time.
80 n = K6_BUG_LOOP;
81 f_vide = vide;
82 rdtscl(d);
83 while (n--)
84 f_vide();
85 rdtscl(d2);
86 d = d2-d;
88 if (d > 20*K6_BUG_LOOP)
89 printk("system stability may be impaired when more than 32 MB are used.\n");
90 else
91 printk("probably OK (after B9730xxxx).\n");
92 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
95 /* K6 with old style WHCR */
96 if (c->x86_model < 8 ||
97 (c->x86_model == 8 && c->x86_mask < 8)) {
98 /* We can only write allocate on the low 508Mb */
99 if (mbytes > 508)
100 mbytes = 508;
102 rdmsr(MSR_K6_WHCR, l, h);
103 if ((l&0x0000FFFF) == 0) {
104 unsigned long flags;
105 l = (1<<0)|((mbytes/4)<<1);
106 local_irq_save(flags);
107 wbinvd();
108 wrmsr(MSR_K6_WHCR, l, h);
109 local_irq_restore(flags);
110 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
111 mbytes);
113 return;
116 if ((c->x86_model == 8 && c->x86_mask > 7) ||
117 c->x86_model == 9 || c->x86_model == 13) {
118 /* The more serious chips .. */
120 if (mbytes > 4092)
121 mbytes = 4092;
123 rdmsr(MSR_K6_WHCR, l, h);
124 if ((l&0xFFFF0000) == 0) {
125 unsigned long flags;
126 l = ((mbytes>>2)<<22)|(1<<16);
127 local_irq_save(flags);
128 wbinvd();
129 wrmsr(MSR_K6_WHCR, l, h);
130 local_irq_restore(flags);
131 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
132 mbytes);
135 return;
138 if (c->x86_model == 10) {
139 /* AMD Geode LX is model 10 */
140 /* placeholder for any needed mods */
141 return;
145 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
147 #ifdef CONFIG_SMP
148 /* calling is from identify_secondary_cpu() ? */
149 if (c->cpu_index == boot_cpu_id)
150 return;
153 * Certain Athlons might work (for various values of 'work') in SMP
154 * but they are not certified as MP capable.
156 /* Athlon 660/661 is valid. */
157 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
158 (c->x86_mask == 1)))
159 goto valid_k7;
161 /* Duron 670 is valid */
162 if ((c->x86_model == 7) && (c->x86_mask == 0))
163 goto valid_k7;
166 * Athlon 662, Duron 671, and Athlon >model 7 have capability
167 * bit. It's worth noting that the A5 stepping (662) of some
168 * Athlon XP's have the MP bit set.
169 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
170 * more.
172 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
173 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
174 (c->x86_model > 7))
175 if (cpu_has_mp)
176 goto valid_k7;
178 /* If we get here, not a certified SMP capable AMD system. */
181 * Don't taint if we are running SMP kernel on a single non-MP
182 * approved Athlon
184 WARN_ONCE(1, "WARNING: This combination of AMD"
185 "processors is not suitable for SMP.\n");
186 if (!test_taint(TAINT_UNSAFE_SMP))
187 add_taint(TAINT_UNSAFE_SMP);
189 valid_k7:
191 #endif
194 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
196 u32 l, h;
199 * Bit 15 of Athlon specific MSR 15, needs to be 0
200 * to enable SSE on Palomino/Morgan/Barton CPU's.
201 * If the BIOS didn't enable it already, enable it here.
203 if (c->x86_model >= 6 && c->x86_model <= 10) {
204 if (!cpu_has(c, X86_FEATURE_XMM)) {
205 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
206 rdmsr(MSR_K7_HWCR, l, h);
207 l &= ~0x00008000;
208 wrmsr(MSR_K7_HWCR, l, h);
209 set_cpu_cap(c, X86_FEATURE_XMM);
214 * It's been determined by AMD that Athlons since model 8 stepping 1
215 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
216 * As per AMD technical note 27212 0.2
218 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
219 rdmsr(MSR_K7_CLK_CTL, l, h);
220 if ((l & 0xfff00000) != 0x20000000) {
221 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
222 ((l & 0x000fffff)|0x20000000));
223 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
227 set_cpu_cap(c, X86_FEATURE_K7);
229 amd_k7_smp_check(c);
231 #endif
233 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
234 static int __cpuinit nearby_node(int apicid)
236 int i, node;
238 for (i = apicid - 1; i >= 0; i--) {
239 node = apicid_to_node[i];
240 if (node != NUMA_NO_NODE && node_online(node))
241 return node;
243 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
244 node = apicid_to_node[i];
245 if (node != NUMA_NO_NODE && node_online(node))
246 return node;
248 return first_node(node_online_map); /* Shouldn't happen */
250 #endif
253 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
254 * Assumes number of cores is a power of two.
256 static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
258 #ifdef CONFIG_X86_HT
259 unsigned bits;
261 bits = c->x86_coreid_bits;
263 /* Low order bits define the core id (index of core in socket) */
264 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
265 /* Convert the initial APIC ID into the socket ID */
266 c->phys_proc_id = c->initial_apicid >> bits;
267 #endif
270 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
272 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
273 int cpu = smp_processor_id();
274 int node;
275 unsigned apicid = hard_smp_processor_id();
277 node = c->phys_proc_id;
278 if (apicid_to_node[apicid] != NUMA_NO_NODE)
279 node = apicid_to_node[apicid];
280 if (!node_online(node)) {
281 /* Two possibilities here:
282 - The CPU is missing memory and no node was created.
283 In that case try picking one from a nearby CPU
284 - The APIC IDs differ from the HyperTransport node IDs
285 which the K8 northbridge parsing fills in.
286 Assume they are all increased by a constant offset,
287 but in the same order as the HT nodeids.
288 If that doesn't result in a usable node fall back to the
289 path for the previous case. */
291 int ht_nodeid = c->initial_apicid;
293 if (ht_nodeid >= 0 &&
294 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
295 node = apicid_to_node[ht_nodeid];
296 /* Pick a nearby node */
297 if (!node_online(node))
298 node = nearby_node(apicid);
300 numa_set_node(cpu, node);
302 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
303 #endif
306 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
308 #ifdef CONFIG_X86_HT
309 unsigned bits, ecx;
311 /* Multi core CPU? */
312 if (c->extended_cpuid_level < 0x80000008)
313 return;
315 ecx = cpuid_ecx(0x80000008);
317 c->x86_max_cores = (ecx & 0xff) + 1;
319 /* CPU telling us the core id bits shift? */
320 bits = (ecx >> 12) & 0xF;
322 /* Otherwise recompute */
323 if (bits == 0) {
324 while ((1 << bits) < c->x86_max_cores)
325 bits++;
328 c->x86_coreid_bits = bits;
329 #endif
332 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
334 early_init_amd_mc(c);
337 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
338 * with P/T states and does not stop in deep C-states
340 if (c->x86_power & (1 << 8)) {
341 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
342 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
345 #ifdef CONFIG_X86_64
346 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
347 #else
348 /* Set MTRR capability flag if appropriate */
349 if (c->x86 == 5)
350 if (c->x86_model == 13 || c->x86_model == 9 ||
351 (c->x86_model == 8 && c->x86_mask >= 8))
352 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
353 #endif
356 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
358 #ifdef CONFIG_SMP
359 unsigned long long value;
362 * Disable TLB flush filter by setting HWCR.FFDIS on K8
363 * bit 6 of msr C001_0015
365 * Errata 63 for SH-B3 steppings
366 * Errata 122 for all steppings (F+ have it disabled by default)
368 if (c->x86 == 0xf) {
369 rdmsrl(MSR_K7_HWCR, value);
370 value |= 1 << 6;
371 wrmsrl(MSR_K7_HWCR, value);
373 #endif
375 early_init_amd(c);
378 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
379 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
381 clear_cpu_cap(c, 0*32+31);
383 #ifdef CONFIG_X86_64
384 /* On C+ stepping K8 rep microcode works well for copy/memset */
385 if (c->x86 == 0xf) {
386 u32 level;
388 level = cpuid_eax(1);
389 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
390 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
392 if (c->x86 == 0x10 || c->x86 == 0x11)
393 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
394 #else
397 * FIXME: We should handle the K5 here. Set up the write
398 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
399 * no bus pipeline)
402 switch (c->x86) {
403 case 4:
404 init_amd_k5(c);
405 break;
406 case 5:
407 init_amd_k6(c);
408 break;
409 case 6: /* An Athlon/Duron */
410 init_amd_k7(c);
411 break;
414 /* K6s reports MCEs but don't actually have all the MSRs */
415 if (c->x86 < 6)
416 clear_cpu_cap(c, X86_FEATURE_MCE);
417 #endif
419 /* Enable workaround for FXSAVE leak */
420 if (c->x86 >= 6)
421 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
423 if (!c->x86_model_id[0]) {
424 switch (c->x86) {
425 case 0xf:
426 /* Should distinguish Models here, but this is only
427 a fallback anyways. */
428 strcpy(c->x86_model_id, "Hammer");
429 break;
433 display_cacheinfo(c);
435 /* Multi core CPU? */
436 if (c->extended_cpuid_level >= 0x80000008) {
437 amd_detect_cmp(c);
438 srat_detect_node(c);
441 #ifdef CONFIG_X86_32
442 detect_ht(c);
443 #endif
445 if (c->extended_cpuid_level >= 0x80000006) {
446 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
447 num_cache_leaves = 4;
448 else
449 num_cache_leaves = 3;
452 if (c->x86 >= 0xf && c->x86 <= 0x11)
453 set_cpu_cap(c, X86_FEATURE_K8);
455 if (cpu_has_xmm2) {
456 /* MFENCE stops RDTSC speculation */
457 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
460 #ifdef CONFIG_X86_64
461 if (c->x86 == 0x10) {
462 /* do this for boot cpu */
463 if (c == &boot_cpu_data)
464 check_enable_amd_mmconf_dmi();
466 fam10h_check_enable_mmcfg();
469 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
470 unsigned long long tseg;
473 * Split up direct mapping around the TSEG SMM area.
474 * Don't do it for gbpages because there seems very little
475 * benefit in doing so.
477 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
478 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
479 if ((tseg>>PMD_SHIFT) <
480 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
481 ((tseg>>PMD_SHIFT) <
482 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
483 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
484 set_memory_4k((unsigned long)__va(tseg), 1);
487 #endif
490 #ifdef CONFIG_X86_32
491 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
493 /* AMD errata T13 (order #21922) */
494 if ((c->x86 == 6)) {
495 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
496 size = 64;
497 if (c->x86_model == 4 &&
498 (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */
499 size = 256;
501 return size;
503 #endif
505 static struct cpu_dev amd_cpu_dev __cpuinitdata = {
506 .c_vendor = "AMD",
507 .c_ident = { "AuthenticAMD" },
508 #ifdef CONFIG_X86_32
509 .c_models = {
510 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
512 [3] = "486 DX/2",
513 [7] = "486 DX/2-WB",
514 [8] = "486 DX/4",
515 [9] = "486 DX/4-WB",
516 [14] = "Am5x86-WT",
517 [15] = "Am5x86-WB"
521 .c_size_cache = amd_size_cache,
522 #endif
523 .c_early_init = early_init_amd,
524 .c_init = init_amd,
525 .c_x86_vendor = X86_VENDOR_AMD,
528 cpu_dev_register(amd_cpu_dev);