[PATCH] i386 cpu hotplug: don't access freed memory
[linux-2.6/linux-2.6-openrd.git] / arch / i386 / kernel / cpu / common.c
blob7eb9213734a321614ea6da6837ccd1bbbdf1dd85
1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
4 #include <linux/smp.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <asm/semaphore.h>
8 #include <asm/processor.h>
9 #include <asm/i387.h>
10 #include <asm/msr.h>
11 #include <asm/io.h>
12 #include <asm/mmu_context.h>
13 #ifdef CONFIG_X86_LOCAL_APIC
14 #include <asm/mpspec.h>
15 #include <asm/apic.h>
16 #include <mach_apic.h>
17 #endif
19 #include "cpu.h"
21 DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
22 EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
24 static int cachesize_override __devinitdata = -1;
25 static int disable_x86_fxsr __devinitdata = 0;
26 static int disable_x86_serial_nr __devinitdata = 1;
28 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
30 extern int disable_pse;
32 static void default_init(struct cpuinfo_x86 * c)
34 /* Not much we can do here... */
35 /* Check if at least it has cpuid */
36 if (c->cpuid_level == -1) {
37 /* No cpuid. It must be an ancient CPU */
38 if (c->x86 == 4)
39 strcpy(c->x86_model_id, "486");
40 else if (c->x86 == 3)
41 strcpy(c->x86_model_id, "386");
45 static struct cpu_dev default_cpu = {
46 .c_init = default_init,
47 .c_vendor = "Unknown",
49 static struct cpu_dev * this_cpu = &default_cpu;
51 static int __init cachesize_setup(char *str)
53 get_option (&str, &cachesize_override);
54 return 1;
56 __setup("cachesize=", cachesize_setup);
58 int __devinit get_model_name(struct cpuinfo_x86 *c)
60 unsigned int *v;
61 char *p, *q;
63 if (cpuid_eax(0x80000000) < 0x80000004)
64 return 0;
66 v = (unsigned int *) c->x86_model_id;
67 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
68 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
69 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
70 c->x86_model_id[48] = 0;
72 /* Intel chips right-justify this string for some dumb reason;
73 undo that brain damage */
74 p = q = &c->x86_model_id[0];
75 while ( *p == ' ' )
76 p++;
77 if ( p != q ) {
78 while ( *p )
79 *q++ = *p++;
80 while ( q <= &c->x86_model_id[48] )
81 *q++ = '\0'; /* Zero-pad the rest */
84 return 1;
88 void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
90 unsigned int n, dummy, ecx, edx, l2size;
92 n = cpuid_eax(0x80000000);
94 if (n >= 0x80000005) {
95 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
96 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
97 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
98 c->x86_cache_size=(ecx>>24)+(edx>>24);
101 if (n < 0x80000006) /* Some chips just has a large L1. */
102 return;
104 ecx = cpuid_ecx(0x80000006);
105 l2size = ecx >> 16;
107 /* do processor-specific cache resizing */
108 if (this_cpu->c_size_cache)
109 l2size = this_cpu->c_size_cache(c,l2size);
111 /* Allow user to override all this if necessary. */
112 if (cachesize_override != -1)
113 l2size = cachesize_override;
115 if ( l2size == 0 )
116 return; /* Again, no L2 cache is possible */
118 c->x86_cache_size = l2size;
120 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
121 l2size, ecx & 0xFF);
124 /* Naming convention should be: <Name> [(<Codename>)] */
125 /* This table only is used unless init_<vendor>() below doesn't set it; */
126 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
128 /* Look up CPU names by table lookup. */
129 static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
131 struct cpu_model_info *info;
133 if ( c->x86_model >= 16 )
134 return NULL; /* Range check */
136 if (!this_cpu)
137 return NULL;
139 info = this_cpu->c_models;
141 while (info && info->family) {
142 if (info->family == c->x86)
143 return info->model_names[c->x86_model];
144 info++;
146 return NULL; /* Not found */
150 static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
152 char *v = c->x86_vendor_id;
153 int i;
154 static int printed;
156 for (i = 0; i < X86_VENDOR_NUM; i++) {
157 if (cpu_devs[i]) {
158 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
159 (cpu_devs[i]->c_ident[1] &&
160 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
161 c->x86_vendor = i;
162 if (!early)
163 this_cpu = cpu_devs[i];
164 return;
168 if (!printed) {
169 printed++;
170 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
171 printk(KERN_ERR "CPU: Your system may be unstable.\n");
173 c->x86_vendor = X86_VENDOR_UNKNOWN;
174 this_cpu = &default_cpu;
178 static int __init x86_fxsr_setup(char * s)
180 disable_x86_fxsr = 1;
181 return 1;
183 __setup("nofxsr", x86_fxsr_setup);
186 /* Standard macro to see if a specific flag is changeable */
187 static inline int flag_is_changeable_p(u32 flag)
189 u32 f1, f2;
191 asm("pushfl\n\t"
192 "pushfl\n\t"
193 "popl %0\n\t"
194 "movl %0,%1\n\t"
195 "xorl %2,%0\n\t"
196 "pushl %0\n\t"
197 "popfl\n\t"
198 "pushfl\n\t"
199 "popl %0\n\t"
200 "popfl\n\t"
201 : "=&r" (f1), "=&r" (f2)
202 : "ir" (flag));
204 return ((f1^f2) & flag) != 0;
208 /* Probe for the CPUID instruction */
209 static int __devinit have_cpuid_p(void)
211 return flag_is_changeable_p(X86_EFLAGS_ID);
214 /* Do minimum CPU detection early.
215 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
216 The others are not touched to avoid unwanted side effects.
218 WARNING: this function is only called on the BP. Don't add code here
219 that is supposed to run on all CPUs. */
220 static void __init early_cpu_detect(void)
222 struct cpuinfo_x86 *c = &boot_cpu_data;
224 c->x86_cache_alignment = 32;
226 if (!have_cpuid_p())
227 return;
229 /* Get vendor name */
230 cpuid(0x00000000, &c->cpuid_level,
231 (int *)&c->x86_vendor_id[0],
232 (int *)&c->x86_vendor_id[8],
233 (int *)&c->x86_vendor_id[4]);
235 get_cpu_vendor(c, 1);
237 c->x86 = 4;
238 if (c->cpuid_level >= 0x00000001) {
239 u32 junk, tfms, cap0, misc;
240 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
241 c->x86 = (tfms >> 8) & 15;
242 c->x86_model = (tfms >> 4) & 15;
243 if (c->x86 == 0xf)
244 c->x86 += (tfms >> 20) & 0xff;
245 if (c->x86 >= 0x6)
246 c->x86_model += ((tfms >> 16) & 0xF) << 4;
247 c->x86_mask = tfms & 15;
248 if (cap0 & (1<<19))
249 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
253 void __devinit generic_identify(struct cpuinfo_x86 * c)
255 u32 tfms, xlvl;
256 int junk;
258 if (have_cpuid_p()) {
259 /* Get vendor name */
260 cpuid(0x00000000, &c->cpuid_level,
261 (int *)&c->x86_vendor_id[0],
262 (int *)&c->x86_vendor_id[8],
263 (int *)&c->x86_vendor_id[4]);
265 get_cpu_vendor(c, 0);
266 /* Initialize the standard set of capabilities */
267 /* Note that the vendor-specific code below might override */
269 /* Intel-defined flags: level 0x00000001 */
270 if ( c->cpuid_level >= 0x00000001 ) {
271 u32 capability, excap;
272 cpuid(0x00000001, &tfms, &junk, &excap, &capability);
273 c->x86_capability[0] = capability;
274 c->x86_capability[4] = excap;
275 c->x86 = (tfms >> 8) & 15;
276 c->x86_model = (tfms >> 4) & 15;
277 if (c->x86 == 0xf) {
278 c->x86 += (tfms >> 20) & 0xff;
279 c->x86_model += ((tfms >> 16) & 0xF) << 4;
281 c->x86_mask = tfms & 15;
282 } else {
283 /* Have CPUID level 0 only - unheard of */
284 c->x86 = 4;
287 /* AMD-defined flags: level 0x80000001 */
288 xlvl = cpuid_eax(0x80000000);
289 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
290 if ( xlvl >= 0x80000001 ) {
291 c->x86_capability[1] = cpuid_edx(0x80000001);
292 c->x86_capability[6] = cpuid_ecx(0x80000001);
294 if ( xlvl >= 0x80000004 )
295 get_model_name(c); /* Default name */
299 early_intel_workaround(c);
301 #ifdef CONFIG_X86_HT
302 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
303 #endif
306 static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
308 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
309 /* Disable processor serial number */
310 unsigned long lo,hi;
311 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
312 lo |= 0x200000;
313 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
314 printk(KERN_NOTICE "CPU serial number disabled.\n");
315 clear_bit(X86_FEATURE_PN, c->x86_capability);
317 /* Disabling the serial number may affect the cpuid level */
318 c->cpuid_level = cpuid_eax(0);
322 static int __init x86_serial_nr_setup(char *s)
324 disable_x86_serial_nr = 0;
325 return 1;
327 __setup("serialnumber", x86_serial_nr_setup);
332 * This does the hard work of actually picking apart the CPU stuff...
334 void __devinit identify_cpu(struct cpuinfo_x86 *c)
336 int i;
338 c->loops_per_jiffy = loops_per_jiffy;
339 c->x86_cache_size = -1;
340 c->x86_vendor = X86_VENDOR_UNKNOWN;
341 c->cpuid_level = -1; /* CPUID not detected */
342 c->x86_model = c->x86_mask = 0; /* So far unknown... */
343 c->x86_vendor_id[0] = '\0'; /* Unset */
344 c->x86_model_id[0] = '\0'; /* Unset */
345 c->x86_max_cores = 1;
346 memset(&c->x86_capability, 0, sizeof c->x86_capability);
348 if (!have_cpuid_p()) {
349 /* First of all, decide if this is a 486 or higher */
350 /* It's a 486 if we can modify the AC flag */
351 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
352 c->x86 = 4;
353 else
354 c->x86 = 3;
357 generic_identify(c);
359 printk(KERN_DEBUG "CPU: After generic identify, caps:");
360 for (i = 0; i < NCAPINTS; i++)
361 printk(" %08lx", c->x86_capability[i]);
362 printk("\n");
364 if (this_cpu->c_identify) {
365 this_cpu->c_identify(c);
367 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
368 for (i = 0; i < NCAPINTS; i++)
369 printk(" %08lx", c->x86_capability[i]);
370 printk("\n");
374 * Vendor-specific initialization. In this section we
375 * canonicalize the feature flags, meaning if there are
376 * features a certain CPU supports which CPUID doesn't
377 * tell us, CPUID claiming incorrect flags, or other bugs,
378 * we handle them here.
380 * At the end of this section, c->x86_capability better
381 * indicate the features this CPU genuinely supports!
383 if (this_cpu->c_init)
384 this_cpu->c_init(c);
386 /* Disable the PN if appropriate */
387 squash_the_stupid_serial_number(c);
390 * The vendor-specific functions might have changed features. Now
391 * we do "generic changes."
394 /* TSC disabled? */
395 if ( tsc_disable )
396 clear_bit(X86_FEATURE_TSC, c->x86_capability);
398 /* FXSR disabled? */
399 if (disable_x86_fxsr) {
400 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
401 clear_bit(X86_FEATURE_XMM, c->x86_capability);
404 if (disable_pse)
405 clear_bit(X86_FEATURE_PSE, c->x86_capability);
407 /* If the model name is still unset, do table lookup. */
408 if ( !c->x86_model_id[0] ) {
409 char *p;
410 p = table_lookup_model(c);
411 if ( p )
412 strcpy(c->x86_model_id, p);
413 else
414 /* Last resort... */
415 sprintf(c->x86_model_id, "%02x/%02x",
416 c->x86_vendor, c->x86_model);
419 /* Now the feature flags better reflect actual CPU features! */
421 printk(KERN_DEBUG "CPU: After all inits, caps:");
422 for (i = 0; i < NCAPINTS; i++)
423 printk(" %08lx", c->x86_capability[i]);
424 printk("\n");
427 * On SMP, boot_cpu_data holds the common feature set between
428 * all CPUs; so make sure that we indicate which features are
429 * common between the CPUs. The first time this routine gets
430 * executed, c == &boot_cpu_data.
432 if ( c != &boot_cpu_data ) {
433 /* AND the already accumulated flags with these */
434 for ( i = 0 ; i < NCAPINTS ; i++ )
435 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
438 /* Init Machine Check Exception if available. */
439 mcheck_init(c);
441 if (c == &boot_cpu_data)
442 sysenter_setup();
443 enable_sep_cpu();
445 if (c == &boot_cpu_data)
446 mtrr_bp_init();
447 else
448 mtrr_ap_init();
451 #ifdef CONFIG_X86_HT
452 void __devinit detect_ht(struct cpuinfo_x86 *c)
454 u32 eax, ebx, ecx, edx;
455 int index_msb, core_bits;
456 int cpu = smp_processor_id();
458 cpuid(1, &eax, &ebx, &ecx, &edx);
460 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
462 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
463 return;
465 smp_num_siblings = (ebx & 0xff0000) >> 16;
467 if (smp_num_siblings == 1) {
468 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
469 } else if (smp_num_siblings > 1 ) {
471 if (smp_num_siblings > NR_CPUS) {
472 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
473 smp_num_siblings = 1;
474 return;
477 index_msb = get_count_order(smp_num_siblings);
478 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
480 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
481 phys_proc_id[cpu]);
483 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
485 index_msb = get_count_order(smp_num_siblings) ;
487 core_bits = get_count_order(c->x86_max_cores);
489 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
490 ((1 << core_bits) - 1);
492 if (c->x86_max_cores > 1)
493 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
494 cpu_core_id[cpu]);
497 #endif
499 void __devinit print_cpu_info(struct cpuinfo_x86 *c)
501 char *vendor = NULL;
503 if (c->x86_vendor < X86_VENDOR_NUM)
504 vendor = this_cpu->c_vendor;
505 else if (c->cpuid_level >= 0)
506 vendor = c->x86_vendor_id;
508 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
509 printk("%s ", vendor);
511 if (!c->x86_model_id[0])
512 printk("%d86", c->x86);
513 else
514 printk("%s", c->x86_model_id);
516 if (c->x86_mask || c->cpuid_level >= 0)
517 printk(" stepping %02x\n", c->x86_mask);
518 else
519 printk("\n");
522 cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
524 /* This is hacky. :)
525 * We're emulating future behavior.
526 * In the future, the cpu-specific init functions will be called implicitly
527 * via the magic of initcalls.
528 * They will insert themselves into the cpu_devs structure.
529 * Then, when cpu_init() is called, we can just iterate over that array.
532 extern int intel_cpu_init(void);
533 extern int cyrix_init_cpu(void);
534 extern int nsc_init_cpu(void);
535 extern int amd_init_cpu(void);
536 extern int centaur_init_cpu(void);
537 extern int transmeta_init_cpu(void);
538 extern int rise_init_cpu(void);
539 extern int nexgen_init_cpu(void);
540 extern int umc_init_cpu(void);
542 void __init early_cpu_init(void)
544 intel_cpu_init();
545 cyrix_init_cpu();
546 nsc_init_cpu();
547 amd_init_cpu();
548 centaur_init_cpu();
549 transmeta_init_cpu();
550 rise_init_cpu();
551 nexgen_init_cpu();
552 umc_init_cpu();
553 early_cpu_detect();
555 #ifdef CONFIG_DEBUG_PAGEALLOC
556 /* pse is not compatible with on-the-fly unmapping,
557 * disable it even if the cpus claim to support it.
559 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
560 disable_pse = 1;
561 #endif
564 * cpu_init() initializes state that is per-CPU. Some data is already
565 * initialized (naturally) in the bootstrap process, such as the GDT
566 * and IDT. We reload them nevertheless, this function acts as a
567 * 'CPU state barrier', nothing should get across.
569 void __devinit cpu_init(void)
571 int cpu = smp_processor_id();
572 struct tss_struct * t = &per_cpu(init_tss, cpu);
573 struct thread_struct *thread = &current->thread;
574 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
575 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
577 if (cpu_test_and_set(cpu, cpu_initialized)) {
578 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
579 for (;;) local_irq_enable();
581 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
583 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
584 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
585 if (tsc_disable && cpu_has_tsc) {
586 printk(KERN_NOTICE "Disabling TSC...\n");
587 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
588 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
589 set_in_cr4(X86_CR4_TSD);
593 * Initialize the per-CPU GDT with the boot GDT,
594 * and set up the GDT descriptor:
596 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
598 /* Set up GDT entry for 16bit stack */
599 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
600 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
601 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
602 (CPU_16BIT_STACK_SIZE - 1);
604 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
605 cpu_gdt_descr[cpu].address = (unsigned long)gdt;
607 load_gdt(&cpu_gdt_descr[cpu]);
608 load_idt(&idt_descr);
611 * Set up and load the per-CPU TSS and LDT
613 atomic_inc(&init_mm.mm_count);
614 current->active_mm = &init_mm;
615 if (current->mm)
616 BUG();
617 enter_lazy_tlb(&init_mm, current);
619 load_esp0(t, thread);
620 set_tss_desc(cpu,t);
621 load_TR_desc();
622 load_LDT(&init_mm.context);
624 #ifdef CONFIG_DOUBLEFAULT
625 /* Set up doublefault TSS pointer in the GDT */
626 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
627 #endif
629 /* Clear %fs and %gs. */
630 asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
632 /* Clear all 6 debug registers: */
633 set_debugreg(0, 0);
634 set_debugreg(0, 1);
635 set_debugreg(0, 2);
636 set_debugreg(0, 3);
637 set_debugreg(0, 6);
638 set_debugreg(0, 7);
641 * Force FPU initialization:
643 current_thread_info()->status = 0;
644 clear_used_math();
645 mxcsr_feature_mask_init();
648 #ifdef CONFIG_HOTPLUG_CPU
649 void __devinit cpu_uninit(void)
651 int cpu = raw_smp_processor_id();
652 cpu_clear(cpu, cpu_initialized);
654 /* lazy TLB state */
655 per_cpu(cpu_tlbstate, cpu).state = 0;
656 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
658 #endif