- Peter Anvin: more P4 configuration parsing
[davej-history.git] / arch / i386 / kernel / setup.c
blob4871aa2ae39e12dfe8a7158db2fa21072055059b
1 /*
2 * linux/arch/i386/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
7 * and Martin Mares, November 1997.
9 * Force Cyrix 6x86(MX) and M II processors to report MTRR capability
10 * and Cyrix "coma bug" recognition by
11 * Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999.
13 * Force Centaur C6 processors to report MTRR capability.
14 * Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999.
16 * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
18 * IDT Winchip tweaks, misc clean ups.
19 * Dave Jones <davej@suse.de>, August 1999
21 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
23 * Better detection of Centaur/IDT WinChip models.
24 * Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999.
26 * Memory region support
27 * David Parsons <orc@pell.chi.il.us>, July-August 1999
29 * Cleaned up cache-detection code
30 * Dave Jones <davej@suse.de>, October 1999
32 * Added proper L2 cache detection for Coppermine
33 * Dragan Stancevic <visitor@valinux.com>, October 1999
35 * Added the original array for capability flags but forgot to credit
36 * myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff
37 * Jauder Ho <jauderho@carumba.com>, January 2000
39 * Detection for Celeron coppermine, identify_cpu() overhauled,
40 * and a few other clean ups.
41 * Dave Jones <davej@suse.de>, April 2000
43 * Pentium III FXSR, SSE support
44 * General FPU state handling cleanups
45 * Gareth Hughes <gareth@valinux.com>, May 2000
47 * Added proper Cascades CPU and L2 cache detection for Cascades
48 * and 8-way type cache happy bunch from Intel:^)
49 * Dragan Stancevic <visitor@valinux.com>, May 2000
51 * Forward port AMD Duron errata T13 from 2.2.17pre
52 * Dave Jones <davej@suse.de>, August 2000
54 * Forward port lots of fixes/improvements from 2.2.18pre
55 * Cyrix III, Pentium IV support.
56 * Dave Jones <davej@suse.de>, October 2000
58 * Massive cleanup of CPU detection and bug handling;
59 * Transmeta CPU detection,
60 * H. Peter Anvin <hpa@zytor.com>, November 2000
64 * This file handles the architecture-dependent parts of initialization
67 #include <linux/errno.h>
68 #include <linux/sched.h>
69 #include <linux/kernel.h>
70 #include <linux/mm.h>
71 #include <linux/stddef.h>
72 #include <linux/unistd.h>
73 #include <linux/ptrace.h>
74 #include <linux/malloc.h>
75 #include <linux/user.h>
76 #include <linux/a.out.h>
77 #include <linux/tty.h>
78 #include <linux/ioport.h>
79 #include <linux/delay.h>
80 #include <linux/config.h>
81 #include <linux/init.h>
82 #include <linux/apm_bios.h>
83 #ifdef CONFIG_BLK_DEV_RAM
84 #include <linux/blk.h>
85 #endif
86 #include <linux/highmem.h>
87 #include <linux/bootmem.h>
88 #include <asm/processor.h>
89 #include <linux/console.h>
90 #include <asm/uaccess.h>
91 #include <asm/system.h>
92 #include <asm/io.h>
93 #include <asm/smp.h>
94 #include <asm/cobalt.h>
95 #include <asm/msr.h>
96 #include <asm/desc.h>
97 #include <asm/e820.h>
98 #include <asm/dma.h>
99 #include <asm/mpspec.h>
100 #include <asm/mmu_context.h>
102 * Machine setup..
105 char ignore_irq13; /* set if exception 16 works */
106 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
108 unsigned long mmu_cr4_features;
111 * Bus types ..
113 #ifdef CONFIG_EISA
114 int EISA_bus;
115 #endif
116 int MCA_bus;
118 /* for MCA, but anyone else can use it if they want */
119 unsigned int machine_id;
120 unsigned int machine_submodel_id;
121 unsigned int BIOS_revision;
122 unsigned int mca_pentium_flag;
125 * Setup options
127 struct drive_info_struct { char dummy[32]; } drive_info;
128 struct screen_info screen_info;
129 struct apm_bios_info apm_bios_info;
130 struct sys_desc_table_struct {
131 unsigned short length;
132 unsigned char table[0];
135 struct e820map e820;
137 unsigned char aux_device_present;
139 #ifdef CONFIG_BLK_DEV_RAM
140 extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
141 extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
142 extern int rd_image_start; /* starting block # of image */
143 #endif
145 extern int root_mountflags;
146 extern char _text, _etext, _edata, _end;
147 extern unsigned long cpu_khz;
149 static int disable_x86_serial_nr __initdata = 1;
152 * This is set up by the setup-routine at boot-time
154 #define PARAM ((unsigned char *)empty_zero_page)
155 #define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
156 #define EXT_MEM_K (*(unsigned short *) (PARAM+2))
157 #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
158 #define E820_MAP_NR (*(char*) (PARAM+E820NR))
159 #define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
160 #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
161 #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
162 #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
163 #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
164 #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
165 #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
166 #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
167 #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
168 #define KERNEL_START (*(unsigned long *) (PARAM+0x214))
169 #define INITRD_START (*(unsigned long *) (PARAM+0x218))
170 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
171 #define COMMAND_LINE ((char *) (PARAM+2048))
172 #define COMMAND_LINE_SIZE 256
174 #define RAMDISK_IMAGE_START_MASK 0x07FF
175 #define RAMDISK_PROMPT_FLAG 0x8000
176 #define RAMDISK_LOAD_FLAG 0x4000
178 #ifdef CONFIG_VISWS
179 char visws_board_type = -1;
180 char visws_board_rev = -1;
182 #define PIIX_PM_START 0x0F80
184 #define SIO_GPIO_START 0x0FC0
186 #define SIO_PM_START 0x0FC8
188 #define PMBASE PIIX_PM_START
189 #define GPIREG0 (PMBASE+0x30)
190 #define GPIREG(x) (GPIREG0+((x)/8))
191 #define PIIX_GPI_BD_ID1 18
192 #define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1)
194 #define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8)
196 #define SIO_INDEX 0x2e
197 #define SIO_DATA 0x2f
199 #define SIO_DEV_SEL 0x7
200 #define SIO_DEV_ENB 0x30
201 #define SIO_DEV_MSB 0x60
202 #define SIO_DEV_LSB 0x61
204 #define SIO_GP_DEV 0x7
206 #define SIO_GP_BASE SIO_GPIO_START
207 #define SIO_GP_MSB (SIO_GP_BASE>>8)
208 #define SIO_GP_LSB (SIO_GP_BASE&0xff)
210 #define SIO_GP_DATA1 (SIO_GP_BASE+0)
212 #define SIO_PM_DEV 0x8
214 #define SIO_PM_BASE SIO_PM_START
215 #define SIO_PM_MSB (SIO_PM_BASE>>8)
216 #define SIO_PM_LSB (SIO_PM_BASE&0xff)
217 #define SIO_PM_INDEX (SIO_PM_BASE+0)
218 #define SIO_PM_DATA (SIO_PM_BASE+1)
220 #define SIO_PM_FER2 0x1
222 #define SIO_PM_GP_EN 0x80
224 static void
225 visws_get_board_type_and_rev(void)
227 int raw;
229 visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG)
230 >> PIIX_GPI_BD_SHIFT;
232 * Get Board rev.
233 * First, we have to initialize the 307 part to allow us access
234 * to the GPIO registers. Let's map them at 0x0fc0 which is right
235 * after the PIIX4 PM section.
237 outb_p(SIO_DEV_SEL, SIO_INDEX);
238 outb_p(SIO_GP_DEV, SIO_DATA); /* Talk to GPIO regs. */
240 outb_p(SIO_DEV_MSB, SIO_INDEX);
241 outb_p(SIO_GP_MSB, SIO_DATA); /* MSB of GPIO base address */
243 outb_p(SIO_DEV_LSB, SIO_INDEX);
244 outb_p(SIO_GP_LSB, SIO_DATA); /* LSB of GPIO base address */
246 outb_p(SIO_DEV_ENB, SIO_INDEX);
247 outb_p(1, SIO_DATA); /* Enable GPIO registers. */
250 * Now, we have to map the power management section to write
251 * a bit which enables access to the GPIO registers.
252 * What lunatic came up with this shit?
254 outb_p(SIO_DEV_SEL, SIO_INDEX);
255 outb_p(SIO_PM_DEV, SIO_DATA); /* Talk to GPIO regs. */
257 outb_p(SIO_DEV_MSB, SIO_INDEX);
258 outb_p(SIO_PM_MSB, SIO_DATA); /* MSB of PM base address */
260 outb_p(SIO_DEV_LSB, SIO_INDEX);
261 outb_p(SIO_PM_LSB, SIO_DATA); /* LSB of PM base address */
263 outb_p(SIO_DEV_ENB, SIO_INDEX);
264 outb_p(1, SIO_DATA); /* Enable PM registers. */
267 * Now, write the PM register which enables the GPIO registers.
269 outb_p(SIO_PM_FER2, SIO_PM_INDEX);
270 outb_p(SIO_PM_GP_EN, SIO_PM_DATA);
273 * Now, initialize the GPIO registers.
274 * We want them all to be inputs which is the
275 * power on default, so let's leave them alone.
276 * So, let's just read the board rev!
278 raw = inb_p(SIO_GP_DATA1);
279 raw &= 0x7f; /* 7 bits of valid board revision ID. */
281 if (visws_board_type == VISWS_320) {
282 if (raw < 0x6) {
283 visws_board_rev = 4;
284 } else if (raw < 0xc) {
285 visws_board_rev = 5;
286 } else {
287 visws_board_rev = 6;
290 } else if (visws_board_type == VISWS_540) {
291 visws_board_rev = 2;
292 } else {
293 visws_board_rev = raw;
296 printk("Silicon Graphics %s (rev %d)\n",
297 visws_board_type == VISWS_320 ? "320" :
298 (visws_board_type == VISWS_540 ? "540" :
299 "unknown"),
300 visws_board_rev);
302 #endif
305 static char command_line[COMMAND_LINE_SIZE];
306 char saved_command_line[COMMAND_LINE_SIZE];
308 struct resource standard_io_resources[] = {
309 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
310 { "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
311 { "timer", 0x40, 0x5f, IORESOURCE_BUSY },
312 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
313 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
314 { "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
315 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
316 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
319 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
321 static struct resource code_resource = { "Kernel code", 0x100000, 0 };
322 static struct resource data_resource = { "Kernel data", 0, 0 };
323 static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
325 /* System ROM resources */
326 #define MAXROMS 6
327 static struct resource rom_resources[MAXROMS] = {
328 { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
329 { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
332 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
334 static void __init probe_roms(void)
336 int roms = 1;
337 unsigned long base;
338 unsigned char *romstart;
340 request_resource(&iomem_resource, rom_resources+0);
342 /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
343 for (base = 0xC0000; base < 0xE0000; base += 2048) {
344 romstart = bus_to_virt(base);
345 if (!romsignature(romstart))
346 continue;
347 request_resource(&iomem_resource, rom_resources + roms);
348 roms++;
349 break;
352 /* Extension roms at C800:0000 - DFFF:0000 */
353 for (base = 0xC8000; base < 0xE0000; base += 2048) {
354 unsigned long length;
356 romstart = bus_to_virt(base);
357 if (!romsignature(romstart))
358 continue;
359 length = romstart[2] * 512;
360 if (length) {
361 unsigned int i;
362 unsigned char chksum;
364 chksum = 0;
365 for (i = 0; i < length; i++)
366 chksum += romstart[i];
368 /* Good checksum? */
369 if (!chksum) {
370 rom_resources[roms].start = base;
371 rom_resources[roms].end = base + length - 1;
372 rom_resources[roms].name = "Extension ROM";
373 rom_resources[roms].flags = IORESOURCE_BUSY;
375 request_resource(&iomem_resource, rom_resources + roms);
376 roms++;
377 if (roms >= MAXROMS)
378 return;
383 /* Final check for motherboard extension rom at E000:0000 */
384 base = 0xE0000;
385 romstart = bus_to_virt(base);
387 if (romsignature(romstart)) {
388 rom_resources[roms].start = base;
389 rom_resources[roms].end = base + 65535;
390 rom_resources[roms].name = "Extension ROM";
391 rom_resources[roms].flags = IORESOURCE_BUSY;
393 request_resource(&iomem_resource, rom_resources + roms);
397 void __init add_memory_region(unsigned long long start,
398 unsigned long long size, int type)
400 int x = e820.nr_map;
402 if (x == E820MAX) {
403 printk("Ooops! Too many entries in the memory map!\n");
404 return;
407 e820.map[x].addr = start;
408 e820.map[x].size = size;
409 e820.map[x].type = type;
410 e820.nr_map++;
411 } /* add_memory_region */
413 #define E820_DEBUG 1
415 static void __init print_memory_map(char *who)
417 int i;
419 for (i = 0; i < e820.nr_map; i++) {
420 printk(" %s: %016Lx @ %016Lx ", who,
421 e820.map[i].size, e820.map[i].addr);
422 switch (e820.map[i].type) {
423 case E820_RAM: printk("(usable)\n");
424 break;
425 case E820_RESERVED:
426 printk("(reserved)\n");
427 break;
428 case E820_ACPI:
429 printk("(ACPI data)\n");
430 break;
431 case E820_NVS:
432 printk("(ACPI NVS)\n");
433 break;
434 default: printk("type %lu\n", e820.map[i].type);
435 break;
441 * Copy the BIOS e820 map into a safe place.
443 * Sanity-check it while we're at it..
445 * If we're lucky and live on a modern system, the setup code
446 * will have given us a memory map that we can use to properly
447 * set up memory. If we aren't, we'll fake a memory map.
449 * We check to see that the memory map contains at least 2 elements
450 * before we'll use it, because the detection code in setup.S may
451 * not be perfect and most every PC known to man has two memory
452 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
453 * thinkpad 560x, for example, does not cooperate with the memory
454 * detection code.)
456 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
458 /* Only one memory region (or negative)? Ignore it */
459 if (nr_map < 2)
460 return -1;
462 do {
463 unsigned long long start = biosmap->addr;
464 unsigned long long size = biosmap->size;
465 unsigned long long end = start + size;
466 unsigned long type = biosmap->type;
468 /* Overflow in 64 bits? Ignore the memory map. */
469 if (start > end)
470 return -1;
473 * Some BIOSes claim RAM in the 640k - 1M region.
474 * Not right. Fix it up.
476 if (type == E820_RAM) {
477 if (start < 0x100000ULL && end > 0xA0000ULL) {
478 if (start < 0xA0000ULL)
479 add_memory_region(start, 0xA0000ULL-start, type);
480 if (end < 0x100000ULL)
481 continue;
482 start = 0x100000ULL;
483 size = end - start;
486 add_memory_region(start, size, type);
487 } while (biosmap++,--nr_map);
488 return 0;
492 * Do NOT EVER look at the BIOS memory size location.
493 * It does not work on many machines.
495 #define LOWMEMSIZE() (0x9f000)
497 void __init setup_memory_region(void)
499 char *who = "BIOS-e820";
502 * Try to copy the BIOS-supplied E820-map.
504 * Otherwise fake a memory map; one section from 0k->640k,
505 * the next section from 1mb->appropriate_mem_k
507 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
508 unsigned long mem_size;
510 /* compare results from other methods and take the greater */
511 if (ALT_MEM_K < EXT_MEM_K) {
512 mem_size = EXT_MEM_K;
513 who = "BIOS-88";
514 } else {
515 mem_size = ALT_MEM_K;
516 who = "BIOS-e801";
519 e820.nr_map = 0;
520 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
521 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
523 printk("BIOS-provided physical RAM map:\n");
524 print_memory_map(who);
525 } /* setup_memory_region */
528 static inline void parse_mem_cmdline (char ** cmdline_p)
530 char c = ' ', *to = command_line, *from = COMMAND_LINE;
531 int len = 0;
532 int usermem = 0;
534 /* Save unparsed command line copy for /proc/cmdline */
535 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
536 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
538 for (;;) {
540 * "mem=nopentium" disables the 4MB page tables.
541 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
542 * to <mem>, overriding the bios size.
543 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
544 * <start> to <start>+<mem>, overriding the bios size.
546 if (c == ' ' && !memcmp(from, "mem=", 4)) {
547 if (to != command_line)
548 to--;
549 if (!memcmp(from+4, "nopentium", 9)) {
550 from += 9+4;
551 clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability);
552 } else if (!memcmp(from+4, "exactmap", 8)) {
553 from += 8+4;
554 e820.nr_map = 0;
555 usermem = 1;
556 } else {
557 /* If the user specifies memory size, we
558 * blow away any automatically generated
559 * size
561 unsigned long start_at, mem_size;
563 if (usermem == 0) {
564 /* first time in: zap the whitelist
565 * and reinitialize it with the
566 * standard low-memory region.
568 e820.nr_map = 0;
569 usermem = 1;
570 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
572 mem_size = memparse(from+4, &from);
573 if (*from == '@')
574 start_at = memparse(from+1, &from);
575 else {
576 start_at = HIGH_MEMORY;
577 mem_size -= HIGH_MEMORY;
578 usermem=0;
580 add_memory_region(start_at, mem_size, E820_RAM);
583 c = *(from++);
584 if (!c)
585 break;
586 if (COMMAND_LINE_SIZE <= ++len)
587 break;
588 *(to++) = c;
590 *to = '\0';
591 *cmdline_p = command_line;
592 if (usermem) {
593 printk("user-defined physical RAM map:\n");
594 print_memory_map("user");
598 void __init setup_arch(char **cmdline_p)
600 unsigned long bootmap_size;
601 unsigned long start_pfn, max_pfn, max_low_pfn;
602 int i;
604 #ifdef CONFIG_VISWS
605 visws_get_board_type_and_rev();
606 #endif
608 ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
609 drive_info = DRIVE_INFO;
610 screen_info = SCREEN_INFO;
611 apm_bios_info = APM_BIOS_INFO;
612 if( SYS_DESC_TABLE.length != 0 ) {
613 MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
614 machine_id = SYS_DESC_TABLE.table[0];
615 machine_submodel_id = SYS_DESC_TABLE.table[1];
616 BIOS_revision = SYS_DESC_TABLE.table[2];
618 aux_device_present = AUX_DEVICE_INFO;
620 #ifdef CONFIG_BLK_DEV_RAM
621 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
622 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
623 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
624 #endif
625 setup_memory_region();
627 if (!MOUNT_ROOT_RDONLY)
628 root_mountflags &= ~MS_RDONLY;
629 init_mm.start_code = (unsigned long) &_text;
630 init_mm.end_code = (unsigned long) &_etext;
631 init_mm.end_data = (unsigned long) &_edata;
632 init_mm.brk = (unsigned long) &_end;
634 code_resource.start = virt_to_bus(&_text);
635 code_resource.end = virt_to_bus(&_etext)-1;
636 data_resource.start = virt_to_bus(&_etext);
637 data_resource.end = virt_to_bus(&_edata)-1;
639 parse_mem_cmdline(cmdline_p);
641 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
642 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
643 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
646 * 128MB for vmalloc and initrd
648 #define VMALLOC_RESERVE (unsigned long)(128 << 20)
649 #define MAXMEM (unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)
650 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
651 #define MAX_NONPAE_PFN (1 << 20)
654 * partially used pages are not usable - thus
655 * we are rounding upwards:
657 start_pfn = PFN_UP(__pa(&_end));
660 * Find the highest page frame number we have available
662 max_pfn = 0;
663 for (i = 0; i < e820.nr_map; i++) {
664 unsigned long start, end;
665 /* RAM? */
666 if (e820.map[i].type != E820_RAM)
667 continue;
668 start = PFN_UP(e820.map[i].addr);
669 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
670 if (start >= end)
671 continue;
672 if (end > max_pfn)
673 max_pfn = end;
677 * Determine low and high memory ranges:
679 max_low_pfn = max_pfn;
680 if (max_low_pfn > MAXMEM_PFN) {
681 max_low_pfn = MAXMEM_PFN;
682 #ifndef CONFIG_HIGHMEM
683 /* Maximum memory usable is what is directly addressable */
684 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
685 MAXMEM>>20);
686 if (max_pfn > MAX_NONPAE_PFN)
687 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
688 else
689 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
690 #else /* !CONFIG_HIGHMEM */
691 #ifndef CONFIG_X86_PAE
692 if (max_pfn > MAX_NONPAE_PFN) {
693 max_pfn = MAX_NONPAE_PFN;
694 printk(KERN_WARNING "Warning only 4GB will be used.\n");
695 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
697 #endif /* !CONFIG_X86_PAE */
698 #endif /* !CONFIG_HIGHMEM */
701 #ifdef CONFIG_HIGHMEM
702 highstart_pfn = highend_pfn = max_pfn;
703 if (max_pfn > MAXMEM_PFN) {
704 highstart_pfn = MAXMEM_PFN;
705 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
706 pages_to_mb(highend_pfn - highstart_pfn));
708 #endif
710 * Initialize the boot-time allocator (with low memory only):
712 bootmap_size = init_bootmem(start_pfn, max_low_pfn);
715 * Register fully available low RAM pages with the bootmem allocator.
717 for (i = 0; i < e820.nr_map; i++) {
718 unsigned long curr_pfn, last_pfn, size;
720 * Reserve usable low memory
722 if (e820.map[i].type != E820_RAM)
723 continue;
725 * We are rounding up the start address of usable memory:
727 curr_pfn = PFN_UP(e820.map[i].addr);
728 if (curr_pfn >= max_low_pfn)
729 continue;
731 * ... and at the end of the usable range downwards:
733 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
735 if (last_pfn > max_low_pfn)
736 last_pfn = max_low_pfn;
739 * .. finally, did all the rounding and playing
740 * around just make the area go away?
742 if (last_pfn <= curr_pfn)
743 continue;
745 size = last_pfn - curr_pfn;
746 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
749 * Reserve the bootmem bitmap itself as well. We do this in two
750 * steps (first step was init_bootmem()) because this catches
751 * the (very unlikely) case of us accidentally initializing the
752 * bootmem allocator with an invalid RAM area.
754 reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
755 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
758 * reserve physical page 0 - it's a special BIOS page on many boxes,
759 * enabling clean reboots, SMP operation, laptop functions.
761 reserve_bootmem(0, PAGE_SIZE);
763 #ifdef CONFIG_SMP
765 * But first pinch a few for the stack/trampoline stuff
766 * FIXME: Don't need the extra page at 4K, but need to fix
767 * trampoline before removing it. (see the GDT stuff)
769 reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
770 smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
771 #endif
773 #ifdef CONFIG_X86_IO_APIC
775 * Find and reserve possible boot-time SMP configuration:
777 find_smp_config();
778 #endif
779 paging_init();
780 #ifdef CONFIG_X86_IO_APIC
782 * get boot-time SMP configuration:
784 if (smp_found_config)
785 get_smp_config();
786 #endif
787 #ifdef CONFIG_X86_LOCAL_APIC
788 init_apic_mappings();
789 #endif
791 #ifdef CONFIG_BLK_DEV_INITRD
792 if (LOADER_TYPE && INITRD_START) {
793 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
794 reserve_bootmem(INITRD_START, INITRD_SIZE);
795 initrd_start =
796 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
797 initrd_end = initrd_start+INITRD_SIZE;
799 else {
800 printk("initrd extends beyond end of memory "
801 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
802 INITRD_START + INITRD_SIZE,
803 max_low_pfn << PAGE_SHIFT);
804 initrd_start = 0;
807 #endif
810 * Request address space for all standard RAM and ROM resources
811 * and also for regions reported as reserved by the e820.
813 probe_roms();
814 for (i = 0; i < e820.nr_map; i++) {
815 struct resource *res;
816 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
817 continue;
818 res = alloc_bootmem_low(sizeof(struct resource));
819 switch (e820.map[i].type) {
820 case E820_RAM: res->name = "System RAM"; break;
821 case E820_ACPI: res->name = "ACPI Tables"; break;
822 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
823 default: res->name = "reserved";
825 res->start = e820.map[i].addr;
826 res->end = res->start + e820.map[i].size - 1;
827 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
828 request_resource(&iomem_resource, res);
829 if (e820.map[i].type == E820_RAM) {
831 * We dont't know which RAM region contains kernel data,
832 * so we try it repeatedly and let the resource manager
833 * test it.
835 request_resource(res, &code_resource);
836 request_resource(res, &data_resource);
839 request_resource(&iomem_resource, &vram_resource);
841 /* request I/O space for devices used on all i[345]86 PCs */
842 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
843 request_resource(&ioport_resource, standard_io_resources+i);
845 #ifdef CONFIG_VT
846 #if defined(CONFIG_VGA_CONSOLE)
847 conswitchp = &vga_con;
848 #elif defined(CONFIG_DUMMY_CONSOLE)
849 conswitchp = &dummy_con;
850 #endif
851 #endif
854 #ifndef CONFIG_X86_TSC
855 static int tsc_disable __initdata = 0;
857 static int __init tsc_setup(char *str)
859 tsc_disable = 1;
860 return 1;
863 __setup("notsc", tsc_setup);
864 #endif
866 static int __init get_model_name(struct cpuinfo_x86 *c)
868 unsigned int *v;
869 char *p, *q;
871 if (cpuid_eax(0x80000000) < 0x80000004)
872 return 0;
874 v = (unsigned int *) c->x86_model_id;
875 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
876 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
877 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
878 c->x86_model_id[48] = 0;
880 /* Intel chips right-justify this string for some dumb reason;
881 undo that brain damage */
882 p = q = &c->x86_model_id[0];
883 while ( *p == ' ' )
884 p++;
885 if ( p != q ) {
886 while ( *p )
887 *q++ = *p++;
888 while ( q <= &c->x86_model_id[48] )
889 *q++ = '\0'; /* Zero-pad the rest */
892 return 1;
896 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
898 unsigned int n, dummy, ecx, edx, l2size;
900 n = cpuid_eax(0x80000000);
902 if (n >= 0x80000005) {
903 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
904 printk("CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
905 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
906 c->x86_cache_size=(ecx>>24)+(edx>>24);
909 if (n < 0x80000006) /* Some chips just has a large L1. */
910 return;
912 ecx = cpuid_ecx(0x80000006);
913 l2size = ecx >> 16;
915 /* AMD errata T13 (order #21922) */
916 if (c->x86_vendor == X86_VENDOR_AMD &&
917 c->x86 == 6 &&
918 c->x86_model == 3 &&
919 c->x86_mask == 0) {
920 l2size = 64;
923 if ( l2size == 0 )
924 return; /* Again, no L2 cache is possible */
926 c->x86_cache_size = l2size;
928 printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
929 l2size, ecx & 0xFF);
933 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
934 * misexecution of code under Linux. Owners of such processors should
935 * contact AMD for precise details and a CPU swap.
937 * See http://www.mygale.com/~poulot/k6bug.html
938 * http://www.amd.com/K6/k6docs/revgd.html
940 * The following test is erm.. interesting. AMD neglected to up
941 * the chip setting when fixing the bug but they also tweaked some
942 * performance at the same time..
945 extern void vide(void);
946 __asm__(".align 4\nvide: ret");
948 static int __init init_amd(struct cpuinfo_x86 *c)
950 u32 l, h;
951 unsigned long flags;
952 int mbytes = max_mapnr >> (20-PAGE_SHIFT);
953 int r;
955 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
956 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
957 clear_bit(0*32+31, &c->x86_capability);
959 r = get_model_name(c);
961 switch(c->x86)
963 case 5:
964 if( c->x86_model < 6 )
966 /* Based on AMD doc 20734R - June 2000 */
967 if ( c->x86_model == 0 ) {
968 clear_bit(X86_FEATURE_APIC, &c->x86_capability);
969 set_bit(X86_FEATURE_PGE, &c->x86_capability);
971 break;
974 if ( c->x86_model == 6 && c->x86_mask == 1 ) {
975 const int K6_BUG_LOOP = 1000000;
976 int n;
977 void (*f_vide)(void);
978 unsigned long d, d2;
980 printk(KERN_INFO "AMD K6 stepping B detected - ");
983 * It looks like AMD fixed the 2.6.2 bug and improved indirect
984 * calls at the same time.
987 n = K6_BUG_LOOP;
988 f_vide = vide;
989 rdtscl(d);
990 while (n--)
991 f_vide();
992 rdtscl(d2);
993 d = d2-d;
995 /* Knock these two lines out if it debugs out ok */
996 printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
997 printk(KERN_INFO "AMD K6 stepping B detected - ");
998 /* -- cut here -- */
999 if (d > 20*K6_BUG_LOOP)
1000 printk("system stability may be impaired when more than 32 MB are used.\n");
1001 else
1002 printk("probably OK (after B9730xxxx).\n");
1003 printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
1006 /* K6 with old style WHCR */
1007 if( c->x86_model < 8 ||
1008 (c->x86_model== 8 && c->x86_mask < 8))
1010 /* We can only write allocate on the low 508Mb */
1011 if(mbytes>508)
1012 mbytes=508;
1014 rdmsr(0xC0000082, l, h);
1015 if((l&0x0000FFFF)==0)
1017 l=(1<<0)|((mbytes/4)<<1);
1018 save_flags(flags);
1019 __cli();
1020 __asm__ __volatile__ ("wbinvd": : :"memory");
1021 wrmsr(0xC0000082, l, h);
1022 restore_flags(flags);
1023 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
1024 mbytes);
1027 break;
1029 if (c->x86_model == 8 || c->x86_model == 9 || c->x86_model == 13)
1031 /* The more serious chips .. */
1033 if(mbytes>4092)
1034 mbytes=4092;
1036 rdmsr(0xC0000082, l, h);
1037 if((l&0xFFFF0000)==0)
1039 l=((mbytes>>2)<<22)|(1<<16);
1040 save_flags(flags);
1041 __cli();
1042 __asm__ __volatile__ ("wbinvd": : :"memory");
1043 wrmsr(0xC0000082, l, h);
1044 restore_flags(flags);
1045 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
1046 mbytes);
1049 /* Set MTRR capability flag if appropriate */
1050 if ( (c->x86_model == 13) ||
1051 (c->x86_model == 9) ||
1052 ((c->x86_model == 8) &&
1053 (c->x86_mask >= 8)) )
1054 set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);
1055 break;
1058 break;
1060 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
1061 break;
1064 display_cacheinfo(c);
1065 return r;
1069 * Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
1071 static inline void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
1073 unsigned char ccr2, ccr3;
1075 /* we test for DEVID by checking whether CCR3 is writable */
1076 cli();
1077 ccr3 = getCx86(CX86_CCR3);
1078 setCx86(CX86_CCR3, ccr3 ^ 0x80);
1079 getCx86(0xc0); /* dummy to change bus */
1081 if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
1082 ccr2 = getCx86(CX86_CCR2);
1083 setCx86(CX86_CCR2, ccr2 ^ 0x04);
1084 getCx86(0xc0); /* dummy */
1086 if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
1087 *dir0 = 0xfd;
1088 else { /* Cx486S A step */
1089 setCx86(CX86_CCR2, ccr2);
1090 *dir0 = 0xfe;
1093 else {
1094 setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
1096 /* read DIR0 and DIR1 CPU registers */
1097 *dir0 = getCx86(CX86_DIR0);
1098 *dir1 = getCx86(CX86_DIR1);
1100 sti();
1104 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
1105 * order to identify the Cyrix CPU model after we're out of setup.c
1107 unsigned char Cx86_dir0_msb __initdata = 0;
1109 static char Cx86_model[][9] __initdata = {
1110 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
1111 "M II ", "Unknown"
1113 static char Cx486_name[][5] __initdata = {
1114 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
1115 "SRx2", "DRx2"
1117 static char Cx486S_name[][4] __initdata = {
1118 "S", "S2", "Se", "S2e"
1120 static char Cx486D_name[][4] __initdata = {
1121 "DX", "DX2", "?", "?", "?", "DX4"
1123 static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
1124 static char cyrix_model_mult1[] __initdata = "12??43";
1125 static char cyrix_model_mult2[] __initdata = "12233445";
1128 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
1129 * BIOSes for compatability with DOS games. This makes the udelay loop
1130 * work correctly, and improves performance.
1133 extern void calibrate_delay(void) __init;
1135 static void __init check_cx686_slop(struct cpuinfo_x86 *c)
1137 if (Cx86_dir0_msb == 3) {
1138 unsigned char ccr3, ccr5;
1140 cli();
1141 ccr3 = getCx86(CX86_CCR3);
1142 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
1143 ccr5 = getCx86(CX86_CCR5);
1144 if (ccr5 & 2)
1145 setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
1146 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
1147 sti();
1149 if (ccr5 & 2) { /* possible wrong calibration done */
1150 printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
1151 calibrate_delay();
1152 c->loops_per_sec = loops_per_sec;
1157 static void __init init_cyrix(struct cpuinfo_x86 *c)
1159 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
1160 char *buf = c->x86_model_id;
1161 const char *p = NULL;
1163 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1164 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1165 clear_bit(0*32+31, &c->x86_capability);
1167 /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
1168 if ( test_bit(1*32+24, &c->x86_capability) ) {
1169 clear_bit(1*32+24, &c->x86_capability);
1170 set_bit(X86_FEATURE_CXMMX, &c->x86_capability);
1173 do_cyrix_devid(&dir0, &dir1);
1175 check_cx686_slop(c);
1177 Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
1178 dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
1180 /* common case step number/rev -- exceptions handled below */
1181 c->x86_model = (dir1 >> 4) + 1;
1182 c->x86_mask = dir1 & 0xf;
1184 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
1185 * We do the same thing for each generation: we work out
1186 * the model, multiplier and stepping. Black magic included,
1187 * to make the silicon step/rev numbers match the printed ones.
1190 switch (dir0_msn) {
1191 unsigned char tmp;
1193 case 0: /* Cx486SLC/DLC/SRx/DRx */
1194 p = Cx486_name[dir0_lsn & 7];
1195 break;
1197 case 1: /* Cx486S/DX/DX2/DX4 */
1198 p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
1199 : Cx486S_name[dir0_lsn & 3];
1200 break;
1202 case 2: /* 5x86 */
1203 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1204 p = Cx86_cb+2;
1205 break;
1207 case 3: /* 6x86/6x86L */
1208 Cx86_cb[1] = ' ';
1209 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1210 if (dir1 > 0x21) { /* 686L */
1211 Cx86_cb[0] = 'L';
1212 p = Cx86_cb;
1213 (c->x86_model)++;
1214 } else /* 686 */
1215 p = Cx86_cb+1;
1216 /* Emulate MTRRs using Cyrix's ARRs. */
1217 set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1218 /* 6x86's contain this bug */
1219 c->coma_bug = 1;
1220 break;
1222 case 4: /* MediaGX/GXm */
1224 * Life sometimes gets weiiiiiiiird if we use this
1225 * on the MediaGX. So we turn it off for now.
1228 #ifdef CONFIG_PCI
1229 /* It isnt really a PCI quirk directly, but the cure is the
1230 same. The MediaGX has deep magic SMM stuff that handles the
1231 SB emulation. It thows away the fifo on disable_dma() which
1232 is wrong and ruins the audio.
1234 Bug2: VSA1 has a wrap bug so that using maximum sized DMA
1235 causes bad things. According to NatSemi VSA2 has another
1236 bug to do with 'hlt'. I've not seen any boards using VSA2
1237 and X doesn't seem to support it either so who cares 8).
1238 VSA1 we work around however.
1241 printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
1242 isa_dma_bridge_buggy = 2;
1243 #endif
1244 c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
1246 /* GXm supports extended cpuid levels 'ala' AMD */
1247 if (c->cpuid_level == 2) {
1248 get_model_name(c); /* get CPU marketing name */
1249 clear_bit(X86_FEATURE_TSC, c->x86_capability);
1250 return;
1252 else { /* MediaGX */
1253 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
1254 p = Cx86_cb+2;
1255 c->x86_model = (dir1 & 0x20) ? 1 : 2;
1256 clear_bit(X86_FEATURE_TSC, &c->x86_capability);
1258 break;
1260 case 5: /* 6x86MX/M II */
1261 if (dir1 > 7) dir0_msn++; /* M II */
1262 else c->coma_bug = 1; /* 6x86MX, it has the bug. */
1263 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
1264 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
1265 p = Cx86_cb+tmp;
1266 if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
1267 (c->x86_model)++;
1268 /* Emulate MTRRs using Cyrix's ARRs. */
1269 set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1270 break;
1272 case 0xf: /* Cyrix 486 without DEVID registers */
1273 switch (dir0_lsn) {
1274 case 0xd: /* either a 486SLC or DLC w/o DEVID */
1275 dir0_msn = 0;
1276 p = Cx486_name[(c->hard_math) ? 1 : 0];
1277 break;
1279 case 0xe: /* a 486S A step */
1280 dir0_msn = 0;
1281 p = Cx486S_name[0];
1282 break;
1284 break;
1286 default: /* unknown (shouldn't happen, we know everyone ;-) */
1287 dir0_msn = 7;
1288 break;
1290 strcpy(buf, Cx86_model[dir0_msn & 7]);
1291 if (p) strcat(buf, p);
1292 return;
1295 static void __init init_centaur(struct cpuinfo_x86 *c)
1297 enum {
1298 ECX8=1<<1,
1299 EIERRINT=1<<2,
1300 DPM=1<<3,
1301 DMCE=1<<4,
1302 DSTPCLK=1<<5,
1303 ELINEAR=1<<6,
1304 DSMC=1<<7,
1305 DTLOCK=1<<8,
1306 EDCTLB=1<<8,
1307 EMMX=1<<9,
1308 DPDC=1<<11,
1309 EBRPRED=1<<12,
1310 DIC=1<<13,
1311 DDC=1<<14,
1312 DNA=1<<15,
1313 ERETSTK=1<<16,
1314 E2MMX=1<<19,
1315 EAMD3D=1<<20,
1318 char *name;
1319 u32 fcr_set=0;
1320 u32 fcr_clr=0;
1321 u32 lo,hi,newlo;
1322 u32 aa,bb,cc,dd;
1324 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1325 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1326 clear_bit(0*32+31, &c->x86_capability);
1328 switch (c->x86) {
1330 case 5:
1331 switch(c->x86_model) {
1332 case 4:
1333 name="C6";
1334 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
1335 fcr_clr=DPDC;
1336 printk("Disabling bugged TSC.\n");
1337 clear_bit(X86_FEATURE_TSC, &c->x86_capability);
1338 break;
1339 case 8:
1340 switch(c->x86_mask) {
1341 default:
1342 name="2";
1343 break;
1344 case 7 ... 9:
1345 name="2A";
1346 break;
1347 case 10 ... 15:
1348 name="2B";
1349 break;
1351 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
1352 fcr_clr=DPDC;
1353 break;
1354 case 9:
1355 name="3";
1356 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
1357 fcr_clr=DPDC;
1358 break;
1359 case 10:
1360 name="4";
1361 /* no info on the WC4 yet */
1362 break;
1363 default:
1364 name="??";
1367 /* get FCR */
1368 rdmsr(0x107, lo, hi);
1370 newlo=(lo|fcr_set) & (~fcr_clr);
1372 if (newlo!=lo) {
1373 printk("Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
1374 wrmsr(0x107, newlo, hi );
1375 } else {
1376 printk("Centaur FCR is 0x%X\n",lo);
1378 /* Emulate MTRRs using Centaur's MCR. */
1379 set_bit(X86_FEATURE_CENTAUR_MCR, &c->x86_capability);
1380 /* Report CX8 */
1381 set_bit(X86_FEATURE_CX8, &c->x86_capability);
1382 /* Set 3DNow! on Winchip 2 and above. */
1383 if (c->x86_model >=8)
1384 set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
1385 /* See if we can find out some more. */
1386 if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
1387 /* Yes, we can. */
1388 cpuid(0x80000005,&aa,&bb,&cc,&dd);
1389 /* Add L1 data and code cache sizes. */
1390 c->x86_cache_size = (cc>>24)+(dd>>24);
1392 sprintf( c->x86_model_id, "WinChip %s", name );
1393 break;
1395 case 6:
1396 switch (c->x86_model) {
1397 case 6: /* Cyrix III */
1398 rdmsr (0x1107, lo, hi);
1399 lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */
1400 wrmsr (0x1107, lo, hi);
1402 set_bit(X86_FEATURE_CX8, &c->x86_capability);
1403 rdmsr (0x80000001, lo, hi);
1404 if (hi & (1<<31))
1405 set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
1407 get_model_name(c);
1408 display_cacheinfo(c);
1409 break;
1411 break;
1417 static void __init init_transmeta(struct cpuinfo_x86 *c)
1419 unsigned int cap_mask, uk, max, dummy;
1420 unsigned int cms_rev1, cms_rev2;
1421 unsigned int cpu_rev, cpu_freq, cpu_flags;
1422 char cpu_info[65];
1424 get_model_name(c); /* Same as AMD/Cyrix */
1425 display_cacheinfo(c);
1427 /* Print CMS and CPU revision */
1428 max = cpuid_eax(0x80860000);
1429 if ( max >= 0x80860001 ) {
1430 cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
1431 printk("CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
1432 (cpu_rev >> 24) & 0xff,
1433 (cpu_rev >> 16) & 0xff,
1434 (cpu_rev >> 8) & 0xff,
1435 cpu_rev & 0xff,
1436 cpu_freq);
1438 if ( max >= 0x80860002 ) {
1439 cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
1440 printk("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
1441 (cms_rev1 >> 24) & 0xff,
1442 (cms_rev1 >> 16) & 0xff,
1443 (cms_rev1 >> 8) & 0xff,
1444 cms_rev1 & 0xff,
1445 cms_rev2);
1447 if ( max >= 0x80860006 ) {
1448 cpuid(0x80860003,
1449 (void *)&cpu_info[0],
1450 (void *)&cpu_info[4],
1451 (void *)&cpu_info[8],
1452 (void *)&cpu_info[12]);
1453 cpuid(0x80860004,
1454 (void *)&cpu_info[16],
1455 (void *)&cpu_info[20],
1456 (void *)&cpu_info[24],
1457 (void *)&cpu_info[28]);
1458 cpuid(0x80860005,
1459 (void *)&cpu_info[32],
1460 (void *)&cpu_info[36],
1461 (void *)&cpu_info[40],
1462 (void *)&cpu_info[44]);
1463 cpuid(0x80860006,
1464 (void *)&cpu_info[48],
1465 (void *)&cpu_info[52],
1466 (void *)&cpu_info[56],
1467 (void *)&cpu_info[60]);
1468 cpu_info[64] = '\0';
1469 printk("CPU: %s\n", cpu_info);
1472 /* Unhide possibly hidden capability flags */
1473 rdmsr(0x80860004, cap_mask, uk);
1474 wrmsr(0x80860004, ~0, uk);
1475 c->x86_capability[0] = cpuid_edx(0x00000001);
1476 wrmsr(0x80860004, cap_mask, uk);
1479 extern void trap_init_f00f_bug(void);
1481 static void __init init_intel(struct cpuinfo_x86 *c)
1483 #ifndef CONFIG_M686
1484 static int f00f_workaround_enabled = 0;
1485 #endif
1486 extern void mcheck_init(struct cpuinfo_x86 *c);
1487 char *p = NULL;
1488 unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
1490 #ifndef CONFIG_M686
1492 * All current models of Pentium and Pentium with MMX technology CPUs
1493 * have the F0 0F bug, which lets nonpriviledged users lock up the system.
1494 * Note that the workaround only should be initialized once...
1496 c->f00f_bug = 0;
1497 if ( c->x86 == 5 ) {
1498 c->f00f_bug = 1;
1499 if ( !f00f_workaround_enabled ) {
1500 trap_init_f00f_bug();
1501 printk(KERN_INFO "Intel Pentium with F0 0F bug - workaround enabled.\n");
1502 f00f_workaround_enabled = 1;
1505 #endif
1508 if (c->cpuid_level > 1) {
1509 /* supports eax=2 call */
1510 int i, j, n;
1511 int regs[4];
1512 unsigned char *dp = (unsigned char *)regs;
1514 /* Number of times to iterate */
1515 n = cpuid_eax(2) & 0xFF;
1517 for ( i = 0 ; i < n ; i++ ) {
1518 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
1520 /* If bit 31 is set, this is an unknown format */
1521 for ( j = 0 ; j < 3 ; j++ ) {
1522 if ( regs[j] < 0 ) regs[j] = 0;
1525 /* Byte 0 is level count, not a descriptor */
1526 for ( j = 1 ; j < 16 ; j++ ) {
1527 unsigned char des = dp[j];
1528 unsigned char dl, dh;
1529 unsigned int cs;
1531 dh = des >> 4;
1532 dl = des & 0x0F;
1534 /* Black magic... */
1536 switch ( dh )
1538 case 0:
1539 switch ( dl ) {
1540 case 6:
1541 /* L1 I cache */
1542 l1i += 8;
1543 break;
1544 case 8:
1545 /* L1 I cache */
1546 l1i += 16;
1547 break;
1548 case 10:
1549 /* L1 D cache */
1550 l1d += 8;
1551 break;
1552 case 12:
1553 /* L1 D cache */
1554 l1d += 16;
1555 break;
1556 default:
1557 /* TLB, or unknown */
1559 break;
1560 case 2:
1561 if ( dl ) {
1562 /* L3 cache */
1563 cs = (dl-1) << 9;
1564 l3 += cs;
1566 break;
1567 case 4:
1568 if ( c->x86 > 6 && dl ) {
1569 /* P4 family */
1570 if ( dl ) {
1571 /* L3 cache */
1572 cs = 128 << (dl-1);
1573 l3 += cs;
1574 break;
1577 /* else same as 8 - fall through */
1578 case 8:
1579 if ( dl ) {
1580 /* L2 cache */
1581 cs = 128 << (dl-1);
1582 l2 += cs;
1584 break;
1585 case 6:
1586 if (dl > 5) {
1587 /* L1 D cache */
1588 cs = 8<<(dl-6);
1589 l1d += cs;
1591 break;
1592 case 7:
1593 if ( dl >= 8 )
1595 /* L2 cache */
1596 cs = 64<<(dl-8);
1597 l2 += cs;
1598 } else {
1599 /* L0 I cache, count as L1 */
1600 cs = dl ? (16 << (dl-1)) : 12;
1601 l1i += cs;
1603 break;
1604 default:
1605 /* TLB, or something else we don't know about */
1606 break;
1610 if ( l1i || l1d )
1611 printk("CPU: L1 I cache: %dK, L1 D cache: %dK\n",
1612 l1i, l1d);
1613 if ( l2 )
1614 printk("CPU: L2 cache: %dK\n", l2);
1615 if ( l3 )
1616 printk("CPU: L3 cache: %dK\n", l3);
1619 * This assumes the L3 cache is shared; it typically lives in
1620 * the northbridge. The L1 caches are included by the L2
1621 * cache, and so should not be included for the purpose of
1622 * SMP switching weights.
1624 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
1627 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
1628 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
1629 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
1631 /* Names for the Pentium II/Celeron processors
1632 detectable only by also checking the cache size.
1633 Dixon is NOT a Celeron. */
1634 if (c->x86 == 6) {
1635 switch (c->x86_model) {
1636 case 5:
1637 if (l2 == 0)
1638 p = "Celeron (Covington)";
1639 if (l2 == 256)
1640 p = "Mobile Pentium II (Dixon)";
1641 break;
1643 case 6:
1644 if (l2 == 128)
1645 p = "Celeron (Mendocino)";
1646 break;
1648 case 8:
1649 if (l2 == 128)
1650 p = "Celeron (Coppermine)";
1651 break;
1655 if ( p )
1656 strcpy(c->x86_model_id, p);
1658 /* Enable MCA if available */
1659 mcheck_init(c);
1662 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1664 char *v = c->x86_vendor_id;
1666 if (!strcmp(v, "GenuineIntel"))
1667 c->x86_vendor = X86_VENDOR_INTEL;
1668 else if (!strcmp(v, "AuthenticAMD"))
1669 c->x86_vendor = X86_VENDOR_AMD;
1670 else if (!strcmp(v, "CyrixInstead"))
1671 c->x86_vendor = X86_VENDOR_CYRIX;
1672 else if (!strcmp(v, "UMC UMC UMC "))
1673 c->x86_vendor = X86_VENDOR_UMC;
1674 else if (!strcmp(v, "CentaurHauls"))
1675 c->x86_vendor = X86_VENDOR_CENTAUR;
1676 else if (!strcmp(v, "NexGenDriven"))
1677 c->x86_vendor = X86_VENDOR_NEXGEN;
1678 else if (!strcmp(v, "RiseRiseRise"))
1679 c->x86_vendor = X86_VENDOR_RISE;
1680 else if (!strcmp(v, "GenuineTMx86") ||
1681 !strcmp(v, "TransmetaCPU"))
1682 c->x86_vendor = X86_VENDOR_TRANSMETA;
1683 else
1684 c->x86_vendor = X86_VENDOR_UNKNOWN;
1687 struct cpu_model_info {
1688 int vendor;
1689 int family;
1690 char *model_names[16];
1693 /* Naming convention should be: <Name> [(<Codename>)] */
1694 /* This table only is used unless init_<vendor>() below doesn't set it; */
1695 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
1696 static struct cpu_model_info cpu_models[] __initdata = {
1697 { X86_VENDOR_INTEL, 4,
1698 { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL",
1699 "486 SX/2", NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL,
1700 NULL, NULL, NULL, NULL, NULL }},
1701 { X86_VENDOR_INTEL, 5,
1702 { "Pentium 60/66 A-step", "Pentium 60/66", "Pentium 75 - 200",
1703 "OverDrive PODP5V83", "Pentium MMX", NULL, NULL,
1704 "Mobile Pentium 75 - 200", "Mobile Pentium MMX", NULL, NULL, NULL,
1705 NULL, NULL, NULL, NULL }},
1706 { X86_VENDOR_INTEL, 6,
1707 { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
1708 NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
1709 "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
1710 "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
1711 { X86_VENDOR_AMD, 4,
1712 { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
1713 "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
1714 "Am5x86-WB" }},
1715 { X86_VENDOR_AMD, 5, /* Is this this really necessary?? */
1716 { "K5/SSA5", "K5",
1717 "K5", "K5", NULL, NULL,
1718 "K6", "K6", "K6-2",
1719 "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
1720 { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
1721 { "Athlon", "Athlon",
1722 "Athlon", NULL, "Athlon", NULL,
1723 NULL, NULL, NULL,
1724 NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1725 { X86_VENDOR_UMC, 4,
1726 { NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1727 NULL, NULL, NULL, NULL, NULL, NULL }},
1728 { X86_VENDOR_NEXGEN, 5,
1729 { "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1730 NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1731 { X86_VENDOR_RISE, 5,
1732 { "mP6", "mP6", NULL, NULL, NULL, NULL, NULL,
1733 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1736 /* Look up CPU names by table lookup. */
1737 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
1739 struct cpu_model_info *info = cpu_models;
1740 int i;
1742 if ( c->x86_model >= 16 )
1743 return NULL; /* Range check */
1745 for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
1746 if ( info->vendor == c->x86_vendor &&
1747 info->family == c->x86 ) {
1748 return info->model_names[c->x86_model];
1750 info++;
1752 return NULL; /* Not found */
1756 * Detect a NexGen CPU running without BIOS hypercode new enough
1757 * to have CPUID. (Thanks to Herbert Oppmann)
1760 static int __init deep_magic_nexgen_probe(void)
1762 int ret;
1764 __asm__ __volatile__ (
1765 " movw $0x5555, %%ax\n"
1766 " xorw %%dx,%%dx\n"
1767 " movw $2, %%cx\n"
1768 " divw %%cx\n"
1769 " movl $0, %%eax\n"
1770 " jnz 1f\n"
1771 " movl $1, %%eax\n"
1772 "1:\n"
1773 : "=a" (ret) : : "cx", "dx" );
1774 return ret;
1777 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
1779 if( test_bit(X86_FEATURE_PN, &c->x86_capability) &&
1780 disable_x86_serial_nr ) {
1781 /* Disable processor serial number */
1782 unsigned long lo,hi;
1783 rdmsr(0x119,lo,hi);
1784 lo |= 0x200000;
1785 wrmsr(0x119,lo,hi);
1786 printk(KERN_INFO "CPU serial number disabled.\n");
1787 clear_bit(X86_FEATURE_PN, &c->x86_capability);
1792 int __init x86_serial_nr_setup(char *s)
1794 disable_x86_serial_nr = 0;
1795 return 1;
1797 __setup("serialnumber", x86_serial_nr_setup);
1800 /* Standard macro to see if a specific flag is changeable */
1801 static inline int flag_is_changeable_p(u32 flag)
1803 u32 f1, f2;
1805 asm("pushfl\n\t"
1806 "pushfl\n\t"
1807 "popl %0\n\t"
1808 "movl %0,%1\n\t"
1809 "xorl %2,%0\n\t"
1810 "pushl %0\n\t"
1811 "popfl\n\t"
1812 "pushfl\n\t"
1813 "popl %0\n\t"
1814 "popfl\n\t"
1815 : "=&r" (f1), "=&r" (f2)
1816 : "ir" (flag));
1818 return ((f1^f2) & flag) != 0;
1822 /* Probe for the CPUID instruction */
1823 static int __init have_cpuid_p(void)
1825 return flag_is_changeable_p(X86_EFLAGS_ID);
1829 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
1830 * by the fact that they preserve the flags across the division of 5/2.
1831 * PII and PPro exhibit this behavior too, but they have cpuid available.
1835 * Perform the Cyrix 5/2 test. A Cyrix won't change
1836 * the flags, while other 486 chips will.
1838 static inline int test_cyrix_52div(void)
1840 unsigned int test;
1842 __asm__ __volatile__(
1843 "sahf\n\t" /* clear flags (%eax = 0x0005) */
1844 "div %b2\n\t" /* divide 5 by 2 */
1845 "lahf" /* store flags into %ah */
1846 : "=a" (test)
1847 : "0" (5), "q" (2)
1848 : "cc");
1850 /* AH is 0x02 on Cyrix after the divide.. */
1851 return (unsigned char) (test >> 8) == 0x02;
1854 /* Try to detect a CPU with disabled CPUID, and if so, enable. This routine
1855 may also be used to detect non-CPUID processors and fill in some of
1856 the information manually. */
1857 static int __init id_and_try_enable_cpuid(struct cpuinfo_x86 *c)
1859 /* First of all, decide if this is a 486 or higher */
1860 /* It's a 486 if we can modify the AC flag */
1861 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
1862 c->x86 = 4;
1863 else
1864 c->x86 = 3;
1866 /* Detect Cyrix with disabled CPUID */
1867 if ( c->x86 == 4 && test_cyrix_52div() ) {
1868 strcpy(c->x86_vendor_id, "CyrixInstead");
1871 /* Detect NexGen with old hypercode */
1872 if ( deep_magic_nexgen_probe() ) {
1873 strcpy(c->x86_vendor_id, "NexGenDriven");
1876 return have_cpuid_p(); /* Check to see if CPUID now enabled? */
1880 * This does the hard work of actually picking apart the CPU stuff...
1882 void __init identify_cpu(struct cpuinfo_x86 *c)
1884 int junk, i;
1885 u32 xlvl, tfms;
1887 c->loops_per_sec = loops_per_sec;
1888 c->x86_cache_size = -1;
1889 c->x86_vendor = X86_VENDOR_UNKNOWN;
1890 c->cpuid_level = -1; /* CPUID not detected */
1891 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1892 c->x86_vendor_id[0] = '\0'; /* Unset */
1893 c->x86_model_id[0] = '\0'; /* Unset */
1894 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1896 if ( !have_cpuid_p() && !id_and_try_enable_cpuid(c) ) {
1897 /* CPU doesn't have CPUID */
1899 /* If there are any capabilities, they're vendor-specific */
1900 /* enable_cpuid() would have set c->x86 for us. */
1901 } else {
1902 /* CPU does have CPUID */
1904 /* Get vendor name */
1905 cpuid(0x00000000, &c->cpuid_level,
1906 (int *)&c->x86_vendor_id[0],
1907 (int *)&c->x86_vendor_id[8],
1908 (int *)&c->x86_vendor_id[4]);
1910 get_cpu_vendor(c);
1912 /* Initialize the standard set of capabilities */
1913 /* Note that the vendor-specific code below might override */
1915 /* Intel-defined flags: level 0x00000001 */
1916 if ( c->cpuid_level >= 0x00000001 ) {
1917 cpuid(0x00000001, &tfms, &junk, &junk,
1918 &c->x86_capability[0]);
1919 c->x86 = (tfms >> 8) & 15;
1920 c->x86_model = (tfms >> 4) & 15;
1921 c->x86_mask = tfms & 15;
1922 } else {
1923 /* Have CPUID level 0 only - unheard of */
1924 c->x86 = 4;
1927 /* AMD-defined flags: level 0x80000001 */
1928 xlvl = cpuid_eax(0x80000000);
1929 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
1930 if ( xlvl >= 0x80000001 )
1931 c->x86_capability[1] = cpuid_edx(0x80000001);
1932 if ( xlvl >= 0x80000004 )
1933 get_model_name(c); /* Default name */
1936 /* Transmeta-defined flags: level 0x80860001 */
1937 xlvl = cpuid_eax(0x80860000);
1938 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
1939 if ( xlvl >= 0x80860001 )
1940 c->x86_capability[2] = cpuid_edx(0x80860001);
1944 printk("CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
1945 c->x86_capability[0],
1946 c->x86_capability[1],
1947 c->x86_capability[2],
1948 c->x86_vendor);
1951 * Vendor-specific initialization. In this section we
1952 * canonicalize the feature flags, meaning if there are
1953 * features a certain CPU supports which CPUID doesn't
1954 * tell us, CPUID claiming incorrect flags, or other bugs,
1955 * we handle them here.
1957 * At the end of this section, c->x86_capability better
1958 * indicate the features this CPU genuinely supports!
1960 switch ( c->x86_vendor ) {
1961 case X86_VENDOR_UNKNOWN:
1962 default:
1963 /* Not much we can do here... */
1964 break;
1966 case X86_VENDOR_CYRIX:
1967 init_cyrix(c);
1968 break;
1970 case X86_VENDOR_AMD:
1971 init_amd(c);
1972 break;
1974 case X86_VENDOR_CENTAUR:
1975 init_centaur(c);
1976 break;
1978 case X86_VENDOR_INTEL:
1979 init_intel(c);
1980 break;
1982 case X86_VENDOR_NEXGEN:
1983 c->x86_cache_size = 256; /* A few had 1 MB... */
1984 break;
1986 case X86_VENDOR_TRANSMETA:
1987 init_transmeta(c);
1988 break;
1991 printk("CPU: After vendor init, caps: %08x %08x %08x %08x\n",
1992 c->x86_capability[0],
1993 c->x86_capability[1],
1994 c->x86_capability[2],
1995 c->x86_capability[3]);
1998 * The vendor-specific functions might have changed features. Now
1999 * we do "generic changes."
2002 /* TSC disabled? */
2003 #ifdef CONFIG_TSC
2004 if ( tsc_disable )
2005 clear_bit(X86_FEATURE_TSC, &c->x86_capability);
2006 #endif
2008 /* Disable the PN if appropriate */
2009 squash_the_stupid_serial_number(c);
2011 /* If the model name is still unset, do table lookup. */
2012 if ( !c->x86_model_id[0] ) {
2013 char *p;
2014 p = table_lookup_model(c);
2015 if ( p )
2016 strcpy(c->x86_model_id, p);
2017 else
2018 /* Last resort... */
2019 sprintf(c->x86_model_id, "%02x/%02x",
2020 c->x86_vendor, c->x86_model);
2023 /* Now the feature flags better reflect actual CPU features! */
2025 printk("CPU: After generic, caps: %08x %08x %08x %08x\n",
2026 c->x86_capability[0],
2027 c->x86_capability[1],
2028 c->x86_capability[2],
2029 c->x86_capability[3]);
2032 * On SMP, boot_cpu_data holds the common feature set between
2033 * all CPUs; so make sure that we indicate which features are
2034 * common between the CPUs. The first time this routine gets
2035 * executed, c == &boot_cpu_data.
2037 if ( c != &boot_cpu_data ) {
2038 /* AND the already accumulated flags with these */
2039 for ( i = 0 ; i < NCAPINTS ; i++ )
2040 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2043 printk("CPU: Common caps: %08x %08x %08x %08x\n",
2044 boot_cpu_data.x86_capability[0],
2045 boot_cpu_data.x86_capability[1],
2046 boot_cpu_data.x86_capability[2],
2047 boot_cpu_data.x86_capability[3]);
2050 * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
2053 void __init dodgy_tsc(void)
2055 get_cpu_vendor(&boot_cpu_data);
2057 if ( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX )
2058 init_cyrix(&boot_cpu_data);
2062 /* These need to match <asm/processor.h> */
2063 static char *cpu_vendor_names[] __initdata = {
2064 "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
2067 void __init print_cpu_info(struct cpuinfo_x86 *c)
2069 char *vendor = NULL;
2071 if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
2072 vendor = cpu_vendor_names[c->x86_vendor];
2073 else if (c->cpuid_level >= 0)
2074 vendor = c->x86_vendor_id;
2076 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
2077 printk("%s ", vendor);
2079 if (!c->x86_model_id[0])
2080 printk("%d86", c->x86);
2081 else
2082 printk("%s", c->x86_model_id);
2084 if (c->x86_mask || c->cpuid_level >= 0)
2085 printk(" stepping %02x\n", c->x86_mask);
2086 else
2087 printk("\n");
2091 * Get CPU information for use by the procfs.
2094 int get_cpuinfo(char * buffer)
2096 char *p = buffer;
2099 * These flag bits must match the definitions in <asm/cpufeature.h>.
2100 * NULL means this bit is undefined or reserved; either way it doesn't
2101 * have meaning as far as Linux is concerned. Note that it's important
2102 * to realize there is a difference between this table and CPUID -- if
2103 * applications want to get the raw CPUID data, they should access
2104 * /dev/cpu/<cpu_nr>/cpuid instead.
2106 static char *x86_cap_flags[] = {
2107 /* Intel-defined */
2108 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
2109 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
2110 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
2111 "fxsr", "sse", "sse2", "ss", NULL, "tm", "ia64", NULL,
2113 /* AMD-defined */
2114 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2115 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
2116 NULL, NULL, NULL, NULL, NULL, NULL, "mmxext", NULL,
2117 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
2119 /* Transmeta-defined */
2120 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
2121 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2122 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2123 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2125 /* Other (Linux-defined) */
2126 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
2127 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2128 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2129 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2131 struct cpuinfo_x86 *c = cpu_data;
2132 int i, n;
2134 for (n = 0; n < NR_CPUS; n++, c++) {
2135 int fpu_exception;
2136 #ifdef CONFIG_SMP
2137 if (!(cpu_online_map & (1<<n)))
2138 continue;
2139 #endif
2140 p += sprintf(p,"processor\t: %d\n"
2141 "vendor_id\t: %s\n"
2142 "cpu family\t: %d\n"
2143 "model\t\t: %d\n"
2144 "model name\t: %s\n",
2146 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
2147 c->x86,
2148 c->x86_model,
2149 c->x86_model_id[0] ? c->x86_model_id : "unknown");
2151 if (c->x86_mask || c->cpuid_level >= 0)
2152 p += sprintf(p, "stepping\t: %d\n", c->x86_mask);
2153 else
2154 p += sprintf(p, "stepping\t: unknown\n");
2156 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
2157 p += sprintf(p, "cpu MHz\t\t: %lu.%03lu\n",
2158 cpu_khz / 1000, (cpu_khz % 1000));
2161 /* Cache size */
2162 if (c->x86_cache_size >= 0)
2163 p += sprintf(p, "cache size\t: %d KB\n", c->x86_cache_size);
2165 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
2166 fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
2167 p += sprintf(p, "fdiv_bug\t: %s\n"
2168 "hlt_bug\t\t: %s\n"
2169 "f00f_bug\t: %s\n"
2170 "coma_bug\t: %s\n"
2171 "fpu\t\t: %s\n"
2172 "fpu_exception\t: %s\n"
2173 "cpuid level\t: %d\n"
2174 "wp\t\t: %s\n"
2175 "features\t:",
2176 c->fdiv_bug ? "yes" : "no",
2177 c->hlt_works_ok ? "no" : "yes",
2178 c->f00f_bug ? "yes" : "no",
2179 c->coma_bug ? "yes" : "no",
2180 c->hard_math ? "yes" : "no",
2181 fpu_exception ? "yes" : "no",
2182 c->cpuid_level,
2183 c->wp_works_ok ? "yes" : "no");
2185 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
2186 if ( test_bit(i, &c->x86_capability) &&
2187 x86_cap_flags[i] != NULL )
2188 p += sprintf(p, " %s", x86_cap_flags[i]);
2190 p += sprintf(p, "\nbogomips\t: %lu.%02lu\n\n",
2191 (c->loops_per_sec+2500)/500000,
2192 ((c->loops_per_sec+2500)/5000) % 100);
2194 return p - buffer;
2197 static unsigned long cpu_initialized __initdata = 0;
2200 * cpu_init() initializes state that is per-CPU. Some data is already
2201 * initialized (naturally) in the bootstrap process, such as the GDT
2202 * and IDT. We reload them nevertheless, this function acts as a
2203 * 'CPU state barrier', nothing should get across.
2205 void __init cpu_init (void)
2207 int nr = smp_processor_id();
2208 struct tss_struct * t = &init_tss[nr];
2210 if (test_and_set_bit(nr, &cpu_initialized)) {
2211 printk("CPU#%d already initialized!\n", nr);
2212 for (;;) __sti();
2214 printk("Initializing CPU#%d\n", nr);
2216 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
2217 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2218 #ifndef CONFIG_X86_TSC
2219 if (tsc_disable && cpu_has_tsc) {
2220 printk("Disabling TSC...\n");
2221 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
2222 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
2223 set_in_cr4(X86_CR4_TSD);
2225 #endif
2227 __asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
2228 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
2231 * Delete NT
2233 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
2236 * set up and load the per-CPU TSS and LDT
2238 atomic_inc(&init_mm.mm_count);
2239 current->active_mm = &init_mm;
2240 if(current->mm)
2241 BUG();
2242 enter_lazy_tlb(&init_mm, current, nr);
2244 t->esp0 = current->thread.esp0;
2245 set_tss_desc(nr,t);
2246 gdt_table[__TSS(nr)].b &= 0xfffffdff;
2247 load_TR(nr);
2248 load_LDT(&init_mm);
2251 * Clear all 6 debug registers:
2254 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
2256 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
2258 #undef CD
2261 * Force FPU initialization:
2263 current->flags &= ~PF_USEDFPU;
2264 current->used_math = 0;
2265 stts();
2269 * Local Variables:
2270 * mode:c
2271 * c-file-style:"k&r"
2272 * c-basic-offset:8
2273 * End: