1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <asm/semaphore.h>
8 #include <asm/processor.h>
12 #include <asm/mmu_context.h>
13 #ifdef CONFIG_X86_LOCAL_APIC
14 #include <asm/mpspec.h>
16 #include <mach_apic.h>
21 DEFINE_PER_CPU(unsigned char, cpu_16bit_stack
[CPU_16BIT_STACK_SIZE
]);
22 EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack
);
24 static int cachesize_override __devinitdata
= -1;
25 static int disable_x86_fxsr __devinitdata
= 0;
26 static int disable_x86_serial_nr __devinitdata
= 1;
28 struct cpu_dev
* cpu_devs
[X86_VENDOR_NUM
] = {};
30 extern int disable_pse
;
32 static void default_init(struct cpuinfo_x86
* c
)
34 /* Not much we can do here... */
35 /* Check if at least it has cpuid */
36 if (c
->cpuid_level
== -1) {
37 /* No cpuid. It must be an ancient CPU */
39 strcpy(c
->x86_model_id
, "486");
41 strcpy(c
->x86_model_id
, "386");
45 static struct cpu_dev default_cpu
= {
46 .c_init
= default_init
,
47 .c_vendor
= "Unknown",
49 static struct cpu_dev
* this_cpu
= &default_cpu
;
51 static int __init
cachesize_setup(char *str
)
53 get_option (&str
, &cachesize_override
);
56 __setup("cachesize=", cachesize_setup
);
58 int __devinit
get_model_name(struct cpuinfo_x86
*c
)
63 if (cpuid_eax(0x80000000) < 0x80000004)
66 v
= (unsigned int *) c
->x86_model_id
;
67 cpuid(0x80000002, &v
[0], &v
[1], &v
[2], &v
[3]);
68 cpuid(0x80000003, &v
[4], &v
[5], &v
[6], &v
[7]);
69 cpuid(0x80000004, &v
[8], &v
[9], &v
[10], &v
[11]);
70 c
->x86_model_id
[48] = 0;
72 /* Intel chips right-justify this string for some dumb reason;
73 undo that brain damage */
74 p
= q
= &c
->x86_model_id
[0];
80 while ( q
<= &c
->x86_model_id
[48] )
81 *q
++ = '\0'; /* Zero-pad the rest */
88 void __devinit
display_cacheinfo(struct cpuinfo_x86
*c
)
90 unsigned int n
, dummy
, ecx
, edx
, l2size
;
92 n
= cpuid_eax(0x80000000);
94 if (n
>= 0x80000005) {
95 cpuid(0x80000005, &dummy
, &dummy
, &ecx
, &edx
);
96 printk(KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
97 edx
>>24, edx
&0xFF, ecx
>>24, ecx
&0xFF);
98 c
->x86_cache_size
=(ecx
>>24)+(edx
>>24);
101 if (n
< 0x80000006) /* Some chips just has a large L1. */
104 ecx
= cpuid_ecx(0x80000006);
107 /* do processor-specific cache resizing */
108 if (this_cpu
->c_size_cache
)
109 l2size
= this_cpu
->c_size_cache(c
,l2size
);
111 /* Allow user to override all this if necessary. */
112 if (cachesize_override
!= -1)
113 l2size
= cachesize_override
;
116 return; /* Again, no L2 cache is possible */
118 c
->x86_cache_size
= l2size
;
120 printk(KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)\n",
124 /* Naming convention should be: <Name> [(<Codename>)] */
125 /* This table only is used unless init_<vendor>() below doesn't set it; */
126 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
128 /* Look up CPU names by table lookup. */
129 static char __devinit
*table_lookup_model(struct cpuinfo_x86
*c
)
131 struct cpu_model_info
*info
;
133 if ( c
->x86_model
>= 16 )
134 return NULL
; /* Range check */
139 info
= this_cpu
->c_models
;
141 while (info
&& info
->family
) {
142 if (info
->family
== c
->x86
)
143 return info
->model_names
[c
->x86_model
];
146 return NULL
; /* Not found */
150 static void __devinit
get_cpu_vendor(struct cpuinfo_x86
*c
, int early
)
152 char *v
= c
->x86_vendor_id
;
156 for (i
= 0; i
< X86_VENDOR_NUM
; i
++) {
158 if (!strcmp(v
,cpu_devs
[i
]->c_ident
[0]) ||
159 (cpu_devs
[i
]->c_ident
[1] &&
160 !strcmp(v
,cpu_devs
[i
]->c_ident
[1]))) {
163 this_cpu
= cpu_devs
[i
];
170 printk(KERN_ERR
"CPU: Vendor unknown, using generic init.\n");
171 printk(KERN_ERR
"CPU: Your system may be unstable.\n");
173 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
174 this_cpu
= &default_cpu
;
178 static int __init
x86_fxsr_setup(char * s
)
180 disable_x86_fxsr
= 1;
183 __setup("nofxsr", x86_fxsr_setup
);
186 /* Standard macro to see if a specific flag is changeable */
187 static inline int flag_is_changeable_p(u32 flag
)
201 : "=&r" (f1
), "=&r" (f2
)
204 return ((f1
^f2
) & flag
) != 0;
208 /* Probe for the CPUID instruction */
209 static int __devinit
have_cpuid_p(void)
211 return flag_is_changeable_p(X86_EFLAGS_ID
);
214 /* Do minimum CPU detection early.
215 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
216 The others are not touched to avoid unwanted side effects.
218 WARNING: this function is only called on the BP. Don't add code here
219 that is supposed to run on all CPUs. */
220 static void __init
early_cpu_detect(void)
222 struct cpuinfo_x86
*c
= &boot_cpu_data
;
224 c
->x86_cache_alignment
= 32;
229 /* Get vendor name */
230 cpuid(0x00000000, &c
->cpuid_level
,
231 (int *)&c
->x86_vendor_id
[0],
232 (int *)&c
->x86_vendor_id
[8],
233 (int *)&c
->x86_vendor_id
[4]);
235 get_cpu_vendor(c
, 1);
238 if (c
->cpuid_level
>= 0x00000001) {
239 u32 junk
, tfms
, cap0
, misc
;
240 cpuid(0x00000001, &tfms
, &misc
, &junk
, &cap0
);
241 c
->x86
= (tfms
>> 8) & 15;
242 c
->x86_model
= (tfms
>> 4) & 15;
244 c
->x86
+= (tfms
>> 20) & 0xff;
246 c
->x86_model
+= ((tfms
>> 16) & 0xF) << 4;
247 c
->x86_mask
= tfms
& 15;
249 c
->x86_cache_alignment
= ((misc
>> 8) & 0xff) * 8;
253 void __devinit
generic_identify(struct cpuinfo_x86
* c
)
258 if (have_cpuid_p()) {
259 /* Get vendor name */
260 cpuid(0x00000000, &c
->cpuid_level
,
261 (int *)&c
->x86_vendor_id
[0],
262 (int *)&c
->x86_vendor_id
[8],
263 (int *)&c
->x86_vendor_id
[4]);
265 get_cpu_vendor(c
, 0);
266 /* Initialize the standard set of capabilities */
267 /* Note that the vendor-specific code below might override */
269 /* Intel-defined flags: level 0x00000001 */
270 if ( c
->cpuid_level
>= 0x00000001 ) {
271 u32 capability
, excap
;
272 cpuid(0x00000001, &tfms
, &junk
, &excap
, &capability
);
273 c
->x86_capability
[0] = capability
;
274 c
->x86_capability
[4] = excap
;
275 c
->x86
= (tfms
>> 8) & 15;
276 c
->x86_model
= (tfms
>> 4) & 15;
278 c
->x86
+= (tfms
>> 20) & 0xff;
279 c
->x86_model
+= ((tfms
>> 16) & 0xF) << 4;
281 c
->x86_mask
= tfms
& 15;
283 /* Have CPUID level 0 only - unheard of */
287 /* AMD-defined flags: level 0x80000001 */
288 xlvl
= cpuid_eax(0x80000000);
289 if ( (xlvl
& 0xffff0000) == 0x80000000 ) {
290 if ( xlvl
>= 0x80000001 ) {
291 c
->x86_capability
[1] = cpuid_edx(0x80000001);
292 c
->x86_capability
[6] = cpuid_ecx(0x80000001);
294 if ( xlvl
>= 0x80000004 )
295 get_model_name(c
); /* Default name */
299 early_intel_workaround(c
);
302 phys_proc_id
[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
306 static void __devinit
squash_the_stupid_serial_number(struct cpuinfo_x86
*c
)
308 if (cpu_has(c
, X86_FEATURE_PN
) && disable_x86_serial_nr
) {
309 /* Disable processor serial number */
311 rdmsr(MSR_IA32_BBL_CR_CTL
,lo
,hi
);
313 wrmsr(MSR_IA32_BBL_CR_CTL
,lo
,hi
);
314 printk(KERN_NOTICE
"CPU serial number disabled.\n");
315 clear_bit(X86_FEATURE_PN
, c
->x86_capability
);
317 /* Disabling the serial number may affect the cpuid level */
318 c
->cpuid_level
= cpuid_eax(0);
322 static int __init
x86_serial_nr_setup(char *s
)
324 disable_x86_serial_nr
= 0;
327 __setup("serialnumber", x86_serial_nr_setup
);
332 * This does the hard work of actually picking apart the CPU stuff...
334 void __devinit
identify_cpu(struct cpuinfo_x86
*c
)
338 c
->loops_per_jiffy
= loops_per_jiffy
;
339 c
->x86_cache_size
= -1;
340 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
341 c
->cpuid_level
= -1; /* CPUID not detected */
342 c
->x86_model
= c
->x86_mask
= 0; /* So far unknown... */
343 c
->x86_vendor_id
[0] = '\0'; /* Unset */
344 c
->x86_model_id
[0] = '\0'; /* Unset */
345 c
->x86_max_cores
= 1;
346 memset(&c
->x86_capability
, 0, sizeof c
->x86_capability
);
348 if (!have_cpuid_p()) {
349 /* First of all, decide if this is a 486 or higher */
350 /* It's a 486 if we can modify the AC flag */
351 if ( flag_is_changeable_p(X86_EFLAGS_AC
) )
359 printk(KERN_DEBUG
"CPU: After generic identify, caps:");
360 for (i
= 0; i
< NCAPINTS
; i
++)
361 printk(" %08lx", c
->x86_capability
[i
]);
364 if (this_cpu
->c_identify
) {
365 this_cpu
->c_identify(c
);
367 printk(KERN_DEBUG
"CPU: After vendor identify, caps:");
368 for (i
= 0; i
< NCAPINTS
; i
++)
369 printk(" %08lx", c
->x86_capability
[i
]);
374 * Vendor-specific initialization. In this section we
375 * canonicalize the feature flags, meaning if there are
376 * features a certain CPU supports which CPUID doesn't
377 * tell us, CPUID claiming incorrect flags, or other bugs,
378 * we handle them here.
380 * At the end of this section, c->x86_capability better
381 * indicate the features this CPU genuinely supports!
383 if (this_cpu
->c_init
)
386 /* Disable the PN if appropriate */
387 squash_the_stupid_serial_number(c
);
390 * The vendor-specific functions might have changed features. Now
391 * we do "generic changes."
396 clear_bit(X86_FEATURE_TSC
, c
->x86_capability
);
399 if (disable_x86_fxsr
) {
400 clear_bit(X86_FEATURE_FXSR
, c
->x86_capability
);
401 clear_bit(X86_FEATURE_XMM
, c
->x86_capability
);
405 clear_bit(X86_FEATURE_PSE
, c
->x86_capability
);
407 /* If the model name is still unset, do table lookup. */
408 if ( !c
->x86_model_id
[0] ) {
410 p
= table_lookup_model(c
);
412 strcpy(c
->x86_model_id
, p
);
415 sprintf(c
->x86_model_id
, "%02x/%02x",
416 c
->x86_vendor
, c
->x86_model
);
419 /* Now the feature flags better reflect actual CPU features! */
421 printk(KERN_DEBUG
"CPU: After all inits, caps:");
422 for (i
= 0; i
< NCAPINTS
; i
++)
423 printk(" %08lx", c
->x86_capability
[i
]);
427 * On SMP, boot_cpu_data holds the common feature set between
428 * all CPUs; so make sure that we indicate which features are
429 * common between the CPUs. The first time this routine gets
430 * executed, c == &boot_cpu_data.
432 if ( c
!= &boot_cpu_data
) {
433 /* AND the already accumulated flags with these */
434 for ( i
= 0 ; i
< NCAPINTS
; i
++ )
435 boot_cpu_data
.x86_capability
[i
] &= c
->x86_capability
[i
];
438 /* Init Machine Check Exception if available. */
441 if (c
== &boot_cpu_data
)
445 if (c
== &boot_cpu_data
)
452 void __devinit
detect_ht(struct cpuinfo_x86
*c
)
454 u32 eax
, ebx
, ecx
, edx
;
455 int index_msb
, core_bits
;
456 int cpu
= smp_processor_id();
458 cpuid(1, &eax
, &ebx
, &ecx
, &edx
);
460 c
->apicid
= phys_pkg_id((ebx
>> 24) & 0xFF, 0);
462 if (!cpu_has(c
, X86_FEATURE_HT
) || cpu_has(c
, X86_FEATURE_CMP_LEGACY
))
465 smp_num_siblings
= (ebx
& 0xff0000) >> 16;
467 if (smp_num_siblings
== 1) {
468 printk(KERN_INFO
"CPU: Hyper-Threading is disabled\n");
469 } else if (smp_num_siblings
> 1 ) {
471 if (smp_num_siblings
> NR_CPUS
) {
472 printk(KERN_WARNING
"CPU: Unsupported number of the siblings %d", smp_num_siblings
);
473 smp_num_siblings
= 1;
477 index_msb
= get_count_order(smp_num_siblings
);
478 phys_proc_id
[cpu
] = phys_pkg_id((ebx
>> 24) & 0xFF, index_msb
);
480 printk(KERN_INFO
"CPU: Physical Processor ID: %d\n",
483 smp_num_siblings
= smp_num_siblings
/ c
->x86_max_cores
;
485 index_msb
= get_count_order(smp_num_siblings
) ;
487 core_bits
= get_count_order(c
->x86_max_cores
);
489 cpu_core_id
[cpu
] = phys_pkg_id((ebx
>> 24) & 0xFF, index_msb
) &
490 ((1 << core_bits
) - 1);
492 if (c
->x86_max_cores
> 1)
493 printk(KERN_INFO
"CPU: Processor Core ID: %d\n",
499 void __devinit
print_cpu_info(struct cpuinfo_x86
*c
)
503 if (c
->x86_vendor
< X86_VENDOR_NUM
)
504 vendor
= this_cpu
->c_vendor
;
505 else if (c
->cpuid_level
>= 0)
506 vendor
= c
->x86_vendor_id
;
508 if (vendor
&& strncmp(c
->x86_model_id
, vendor
, strlen(vendor
)))
509 printk("%s ", vendor
);
511 if (!c
->x86_model_id
[0])
512 printk("%d86", c
->x86
);
514 printk("%s", c
->x86_model_id
);
516 if (c
->x86_mask
|| c
->cpuid_level
>= 0)
517 printk(" stepping %02x\n", c
->x86_mask
);
522 cpumask_t cpu_initialized __devinitdata
= CPU_MASK_NONE
;
525 * We're emulating future behavior.
526 * In the future, the cpu-specific init functions will be called implicitly
527 * via the magic of initcalls.
528 * They will insert themselves into the cpu_devs structure.
529 * Then, when cpu_init() is called, we can just iterate over that array.
532 extern int intel_cpu_init(void);
533 extern int cyrix_init_cpu(void);
534 extern int nsc_init_cpu(void);
535 extern int amd_init_cpu(void);
536 extern int centaur_init_cpu(void);
537 extern int transmeta_init_cpu(void);
538 extern int rise_init_cpu(void);
539 extern int nexgen_init_cpu(void);
540 extern int umc_init_cpu(void);
542 void __init
early_cpu_init(void)
549 transmeta_init_cpu();
555 #ifdef CONFIG_DEBUG_PAGEALLOC
556 /* pse is not compatible with on-the-fly unmapping,
557 * disable it even if the cpus claim to support it.
559 clear_bit(X86_FEATURE_PSE
, boot_cpu_data
.x86_capability
);
564 * cpu_init() initializes state that is per-CPU. Some data is already
565 * initialized (naturally) in the bootstrap process, such as the GDT
566 * and IDT. We reload them nevertheless, this function acts as a
567 * 'CPU state barrier', nothing should get across.
569 void __devinit
cpu_init(void)
571 int cpu
= smp_processor_id();
572 struct tss_struct
* t
= &per_cpu(init_tss
, cpu
);
573 struct thread_struct
*thread
= ¤t
->thread
;
574 struct desc_struct
*gdt
= get_cpu_gdt_table(cpu
);
575 __u32 stk16_off
= (__u32
)&per_cpu(cpu_16bit_stack
, cpu
);
577 if (cpu_test_and_set(cpu
, cpu_initialized
)) {
578 printk(KERN_WARNING
"CPU#%d already initialized!\n", cpu
);
579 for (;;) local_irq_enable();
581 printk(KERN_INFO
"Initializing CPU#%d\n", cpu
);
583 if (cpu_has_vme
|| cpu_has_tsc
|| cpu_has_de
)
584 clear_in_cr4(X86_CR4_VME
|X86_CR4_PVI
|X86_CR4_TSD
|X86_CR4_DE
);
585 if (tsc_disable
&& cpu_has_tsc
) {
586 printk(KERN_NOTICE
"Disabling TSC...\n");
587 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
588 clear_bit(X86_FEATURE_TSC
, boot_cpu_data
.x86_capability
);
589 set_in_cr4(X86_CR4_TSD
);
593 * Initialize the per-CPU GDT with the boot GDT,
594 * and set up the GDT descriptor:
596 memcpy(gdt
, cpu_gdt_table
, GDT_SIZE
);
598 /* Set up GDT entry for 16bit stack */
599 *(__u64
*)(&gdt
[GDT_ENTRY_ESPFIX_SS
]) |=
600 ((((__u64
)stk16_off
) << 16) & 0x000000ffffff0000ULL
) |
601 ((((__u64
)stk16_off
) << 32) & 0xff00000000000000ULL
) |
602 (CPU_16BIT_STACK_SIZE
- 1);
604 cpu_gdt_descr
[cpu
].size
= GDT_SIZE
- 1;
605 cpu_gdt_descr
[cpu
].address
= (unsigned long)gdt
;
607 load_gdt(&cpu_gdt_descr
[cpu
]);
608 load_idt(&idt_descr
);
611 * Set up and load the per-CPU TSS and LDT
613 atomic_inc(&init_mm
.mm_count
);
614 current
->active_mm
= &init_mm
;
617 enter_lazy_tlb(&init_mm
, current
);
619 load_esp0(t
, thread
);
622 load_LDT(&init_mm
.context
);
624 #ifdef CONFIG_DOUBLEFAULT
625 /* Set up doublefault TSS pointer in the GDT */
626 __set_tss_desc(cpu
, GDT_ENTRY_DOUBLEFAULT_TSS
, &doublefault_tss
);
629 /* Clear %fs and %gs. */
630 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
632 /* Clear all 6 debug registers: */
641 * Force FPU initialization:
643 current_thread_info()->status
= 0;
645 mxcsr_feature_mask_init();
648 #ifdef CONFIG_HOTPLUG_CPU
649 void __devinit
cpu_uninit(void)
651 int cpu
= raw_smp_processor_id();
652 cpu_clear(cpu
, cpu_initialized
);
655 per_cpu(cpu_tlbstate
, cpu
).state
= 0;
656 per_cpu(cpu_tlbstate
, cpu
).active_mm
= &init_mm
;