x86: use ELF section to list CPU vendor specific code
[linux-2.6/mini2440.git] / arch / x86 / kernel / cpu / intel.c
blob34468b2e25072ab2667dac23bfb58d46774b12c5
1 #include <linux/init.h>
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
6 #include <linux/smp.h>
7 #include <linux/thread_info.h>
8 #include <linux/module.h>
10 #include <asm/processor.h>
11 #include <asm/pgtable.h>
12 #include <asm/msr.h>
13 #include <asm/uaccess.h>
14 #include <asm/ptrace.h>
15 #include <asm/ds.h>
16 #include <asm/bugs.h>
18 #include "cpu.h"
20 #ifdef CONFIG_X86_LOCAL_APIC
21 #include <asm/mpspec.h>
22 #include <asm/apic.h>
23 #include <mach_apic.h>
24 #endif
26 #ifdef CONFIG_X86_INTEL_USERCOPY
28 * Alignment at which movsl is preferred for bulk memory copies.
30 struct movsl_mask movsl_mask __read_mostly;
31 #endif
33 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
35 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
36 if (c->x86 == 15 && c->x86_cache_alignment == 64)
37 c->x86_cache_alignment = 128;
38 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
39 (c->x86 == 0x6 && c->x86_model >= 0x0e))
40 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
44 * Early probe support logic for ppro memory erratum #50
46 * This is called before we do cpu ident work
49 int __cpuinit ppro_with_ram_bug(void)
51 /* Uses data from early_cpu_detect now */
52 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
53 boot_cpu_data.x86 == 6 &&
54 boot_cpu_data.x86_model == 1 &&
55 boot_cpu_data.x86_mask < 8) {
56 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
57 return 1;
59 return 0;
64 * P4 Xeon errata 037 workaround.
65 * Hardware prefetcher may cause stale data to be loaded into the cache.
67 static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
69 unsigned long lo, hi;
71 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
72 rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
73 if ((lo & (1<<9)) == 0) {
74 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
75 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
76 lo |= (1<<9); /* Disable hw prefetching */
77 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
84 * find out the number of processor cores on the die
86 static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
88 unsigned int eax, ebx, ecx, edx;
90 if (c->cpuid_level < 4)
91 return 1;
93 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
94 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
95 if (eax & 0x1f)
96 return ((eax >> 26) + 1);
97 else
98 return 1;
101 #ifdef CONFIG_X86_F00F_BUG
102 static void __cpuinit trap_init_f00f_bug(void)
104 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
107 * Update the IDT descriptor and reload the IDT so that
108 * it uses the read-only mapped virtual address.
110 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
111 load_idt(&idt_descr);
113 #endif
115 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
117 unsigned int l2 = 0;
118 char *p = NULL;
120 early_init_intel(c);
122 #ifdef CONFIG_X86_F00F_BUG
124 * All current models of Pentium and Pentium with MMX technology CPUs
125 * have the F0 0F bug, which lets nonprivileged users lock up the system.
126 * Note that the workaround only should be initialized once...
128 c->f00f_bug = 0;
129 if (!paravirt_enabled() && c->x86 == 5) {
130 static int f00f_workaround_enabled = 0;
132 c->f00f_bug = 1;
133 if ( !f00f_workaround_enabled ) {
134 trap_init_f00f_bug();
135 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
136 f00f_workaround_enabled = 1;
139 #endif
141 l2 = init_intel_cacheinfo(c);
142 if (c->cpuid_level > 9 ) {
143 unsigned eax = cpuid_eax(10);
144 /* Check for version and the number of counters */
145 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
146 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
149 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
150 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
151 clear_bit(X86_FEATURE_SEP, c->x86_capability);
153 /* Names for the Pentium II/Celeron processors
154 detectable only by also checking the cache size.
155 Dixon is NOT a Celeron. */
156 if (c->x86 == 6) {
157 switch (c->x86_model) {
158 case 5:
159 if (c->x86_mask == 0) {
160 if (l2 == 0)
161 p = "Celeron (Covington)";
162 else if (l2 == 256)
163 p = "Mobile Pentium II (Dixon)";
165 break;
167 case 6:
168 if (l2 == 128)
169 p = "Celeron (Mendocino)";
170 else if (c->x86_mask == 0 || c->x86_mask == 5)
171 p = "Celeron-A";
172 break;
174 case 8:
175 if (l2 == 128)
176 p = "Celeron (Coppermine)";
177 break;
181 if ( p )
182 strcpy(c->x86_model_id, p);
184 c->x86_max_cores = num_cpu_cores(c);
186 detect_ht(c);
188 /* Work around errata */
189 Intel_errata_workarounds(c);
191 #ifdef CONFIG_X86_INTEL_USERCOPY
193 * Set up the preferred alignment for movsl bulk memory moves
195 switch (c->x86) {
196 case 4: /* 486: untested */
197 break;
198 case 5: /* Old Pentia: untested */
199 break;
200 case 6: /* PII/PIII only like movsl with 8-byte alignment */
201 movsl_mask.mask = 7;
202 break;
203 case 15: /* P4 is OK down to 8-byte alignment */
204 movsl_mask.mask = 7;
205 break;
207 #endif
209 if (cpu_has_xmm2)
210 set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability);
211 if (c->x86 == 15) {
212 set_bit(X86_FEATURE_P4, c->x86_capability);
214 if (c->x86 == 6)
215 set_bit(X86_FEATURE_P3, c->x86_capability);
216 if (cpu_has_ds) {
217 unsigned int l1;
218 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
219 if (!(l1 & (1<<11)))
220 set_bit(X86_FEATURE_BTS, c->x86_capability);
221 if (!(l1 & (1<<12)))
222 set_bit(X86_FEATURE_PEBS, c->x86_capability);
225 if (cpu_has_bts)
226 ds_init_intel(c);
229 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
231 /* Intel PIII Tualatin. This comes in two flavours.
232 * One has 256kb of cache, the other 512. We have no way
233 * to determine which, so we use a boottime override
234 * for the 512kb model, and assume 256 otherwise.
236 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
237 size = 256;
238 return size;
241 static struct cpu_dev intel_cpu_dev __cpuinitdata = {
242 .c_vendor = "Intel",
243 .c_ident = { "GenuineIntel" },
244 .c_models = {
245 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
247 [0] = "486 DX-25/33",
248 [1] = "486 DX-50",
249 [2] = "486 SX",
250 [3] = "486 DX/2",
251 [4] = "486 SL",
252 [5] = "486 SX/2",
253 [7] = "486 DX/2-WB",
254 [8] = "486 DX/4",
255 [9] = "486 DX/4-WB"
258 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
260 [0] = "Pentium 60/66 A-step",
261 [1] = "Pentium 60/66",
262 [2] = "Pentium 75 - 200",
263 [3] = "OverDrive PODP5V83",
264 [4] = "Pentium MMX",
265 [7] = "Mobile Pentium 75 - 200",
266 [8] = "Mobile Pentium MMX"
269 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
271 [0] = "Pentium Pro A-step",
272 [1] = "Pentium Pro",
273 [3] = "Pentium II (Klamath)",
274 [4] = "Pentium II (Deschutes)",
275 [5] = "Pentium II (Deschutes)",
276 [6] = "Mobile Pentium II",
277 [7] = "Pentium III (Katmai)",
278 [8] = "Pentium III (Coppermine)",
279 [10] = "Pentium III (Cascades)",
280 [11] = "Pentium III (Tualatin)",
283 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
285 [0] = "Pentium 4 (Unknown)",
286 [1] = "Pentium 4 (Willamette)",
287 [2] = "Pentium 4 (Northwood)",
288 [4] = "Pentium 4 (Foster)",
289 [5] = "Pentium 4 (Foster)",
293 .c_early_init = early_init_intel,
294 .c_init = init_intel,
295 .c_size_cache = intel_size_cache,
298 cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
300 #ifndef CONFIG_X86_CMPXCHG
301 unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
303 u8 prev;
304 unsigned long flags;
306 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
307 local_irq_save(flags);
308 prev = *(u8 *)ptr;
309 if (prev == old)
310 *(u8 *)ptr = new;
311 local_irq_restore(flags);
312 return prev;
314 EXPORT_SYMBOL(cmpxchg_386_u8);
316 unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
318 u16 prev;
319 unsigned long flags;
321 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
322 local_irq_save(flags);
323 prev = *(u16 *)ptr;
324 if (prev == old)
325 *(u16 *)ptr = new;
326 local_irq_restore(flags);
327 return prev;
329 EXPORT_SYMBOL(cmpxchg_386_u16);
331 unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
333 u32 prev;
334 unsigned long flags;
336 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
337 local_irq_save(flags);
338 prev = *(u32 *)ptr;
339 if (prev == old)
340 *(u32 *)ptr = new;
341 local_irq_restore(flags);
342 return prev;
344 EXPORT_SYMBOL(cmpxchg_386_u32);
345 #endif
347 #ifndef CONFIG_X86_CMPXCHG64
348 unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
350 u64 prev;
351 unsigned long flags;
353 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
354 local_irq_save(flags);
355 prev = *(u64 *)ptr;
356 if (prev == old)
357 *(u64 *)ptr = new;
358 local_irq_restore(flags);
359 return prev;
361 EXPORT_SYMBOL(cmpxchg_486_u64);
362 #endif
364 // arch_initcall(intel_cpu_init);