- pre6:
[davej-history.git] / arch / i386 / kernel / setup.c
blob00e5c7b44a85dbf75a125fdfa2bf65e1d84b4870
1 /*
2 * linux/arch/i386/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
7 * and Martin Mares, November 1997.
9 * Force Cyrix 6x86(MX) and M II processors to report MTRR capability
10 * and Cyrix "coma bug" recognition by
11 * Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999.
13 * Force Centaur C6 processors to report MTRR capability.
14 * Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999.
16 * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
18 * IDT Winchip tweaks, misc clean ups.
19 * Dave Jones <davej@suse.de>, August 1999
21 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
23 * Better detection of Centaur/IDT WinChip models.
24 * Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999.
26 * Memory region support
27 * David Parsons <orc@pell.chi.il.us>, July-August 1999
29 * Cleaned up cache-detection code
30 * Dave Jones <davej@suse.de>, October 1999
32 * Added proper L2 cache detection for Coppermine
33 * Dragan Stancevic <visitor@valinux.com>, October 1999
35 * Added the original array for capability flags but forgot to credit
36 * myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff
37 * Jauder Ho <jauderho@carumba.com>, January 2000
39 * Detection for Celeron coppermine, identify_cpu() overhauled,
40 * and a few other clean ups.
41 * Dave Jones <davej@suse.de>, April 2000
43 * Pentium III FXSR, SSE support
44 * General FPU state handling cleanups
45 * Gareth Hughes <gareth@valinux.com>, May 2000
47 * Added proper Cascades CPU and L2 cache detection for Cascades
48 * and 8-way type cache happy bunch from Intel:^)
49 * Dragan Stancevic <visitor@valinux.com>, May 2000
51 * Forward port AMD Duron errata T13 from 2.2.17pre
52 * Dave Jones <davej@suse.de>, August 2000
57 * This file handles the architecture-dependent parts of initialization
60 #include <linux/errno.h>
61 #include <linux/sched.h>
62 #include <linux/kernel.h>
63 #include <linux/mm.h>
64 #include <linux/stddef.h>
65 #include <linux/unistd.h>
66 #include <linux/ptrace.h>
67 #include <linux/malloc.h>
68 #include <linux/user.h>
69 #include <linux/a.out.h>
70 #include <linux/tty.h>
71 #include <linux/ioport.h>
72 #include <linux/delay.h>
73 #include <linux/config.h>
74 #include <linux/init.h>
75 #include <linux/apm_bios.h>
76 #ifdef CONFIG_BLK_DEV_RAM
77 #include <linux/blk.h>
78 #endif
79 #include <linux/highmem.h>
80 #include <linux/bootmem.h>
81 #include <asm/processor.h>
82 #include <linux/console.h>
83 #include <asm/uaccess.h>
84 #include <asm/system.h>
85 #include <asm/io.h>
86 #include <asm/smp.h>
87 #include <asm/cobalt.h>
88 #include <asm/msr.h>
89 #include <asm/desc.h>
90 #include <asm/e820.h>
91 #include <asm/dma.h>
92 #include <asm/mpspec.h>
93 #include <asm/mmu_context.h>
95 * Machine setup..
98 char ignore_irq13 = 0; /* set if exception 16 works */
99 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
101 unsigned long mmu_cr4_features = 0;
104 * Bus types ..
106 int EISA_bus = 0;
107 int MCA_bus = 0;
109 /* for MCA, but anyone else can use it if they want */
110 unsigned int machine_id = 0;
111 unsigned int machine_submodel_id = 0;
112 unsigned int BIOS_revision = 0;
113 unsigned int mca_pentium_flag = 0;
116 * Setup options
118 struct drive_info_struct { char dummy[32]; } drive_info;
119 struct screen_info screen_info;
120 struct apm_bios_info apm_bios_info;
121 struct sys_desc_table_struct {
122 unsigned short length;
123 unsigned char table[0];
126 struct e820map e820 = { 0 };
128 unsigned char aux_device_present;
130 #ifdef CONFIG_BLK_DEV_RAM
131 extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
132 extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
133 extern int rd_image_start; /* starting block # of image */
134 #endif
136 extern int root_mountflags;
137 extern char _text, _etext, _edata, _end;
138 extern unsigned long cpu_hz;
141 * This is set up by the setup-routine at boot-time
143 #define PARAM ((unsigned char *)empty_zero_page)
144 #define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
145 #define EXT_MEM_K (*(unsigned short *) (PARAM+2))
146 #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
147 #define E820_MAP_NR (*(char*) (PARAM+E820NR))
148 #define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
149 #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
150 #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
151 #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
152 #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
153 #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
154 #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
155 #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
156 #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
157 #define KERNEL_START (*(unsigned long *) (PARAM+0x214))
158 #define INITRD_START (*(unsigned long *) (PARAM+0x218))
159 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
160 #define COMMAND_LINE ((char *) (PARAM+2048))
161 #define COMMAND_LINE_SIZE 256
163 #define RAMDISK_IMAGE_START_MASK 0x07FF
164 #define RAMDISK_PROMPT_FLAG 0x8000
165 #define RAMDISK_LOAD_FLAG 0x4000
167 #ifdef CONFIG_VISWS
168 char visws_board_type = -1;
169 char visws_board_rev = -1;
171 #define PIIX_PM_START 0x0F80
173 #define SIO_GPIO_START 0x0FC0
175 #define SIO_PM_START 0x0FC8
177 #define PMBASE PIIX_PM_START
178 #define GPIREG0 (PMBASE+0x30)
179 #define GPIREG(x) (GPIREG0+((x)/8))
180 #define PIIX_GPI_BD_ID1 18
181 #define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1)
183 #define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8)
185 #define SIO_INDEX 0x2e
186 #define SIO_DATA 0x2f
188 #define SIO_DEV_SEL 0x7
189 #define SIO_DEV_ENB 0x30
190 #define SIO_DEV_MSB 0x60
191 #define SIO_DEV_LSB 0x61
193 #define SIO_GP_DEV 0x7
195 #define SIO_GP_BASE SIO_GPIO_START
196 #define SIO_GP_MSB (SIO_GP_BASE>>8)
197 #define SIO_GP_LSB (SIO_GP_BASE&0xff)
199 #define SIO_GP_DATA1 (SIO_GP_BASE+0)
201 #define SIO_PM_DEV 0x8
203 #define SIO_PM_BASE SIO_PM_START
204 #define SIO_PM_MSB (SIO_PM_BASE>>8)
205 #define SIO_PM_LSB (SIO_PM_BASE&0xff)
206 #define SIO_PM_INDEX (SIO_PM_BASE+0)
207 #define SIO_PM_DATA (SIO_PM_BASE+1)
209 #define SIO_PM_FER2 0x1
211 #define SIO_PM_GP_EN 0x80
213 static void
214 visws_get_board_type_and_rev(void)
216 int raw;
218 visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG)
219 >> PIIX_GPI_BD_SHIFT;
221 * Get Board rev.
222 * First, we have to initialize the 307 part to allow us access
223 * to the GPIO registers. Let's map them at 0x0fc0 which is right
224 * after the PIIX4 PM section.
226 outb_p(SIO_DEV_SEL, SIO_INDEX);
227 outb_p(SIO_GP_DEV, SIO_DATA); /* Talk to GPIO regs. */
229 outb_p(SIO_DEV_MSB, SIO_INDEX);
230 outb_p(SIO_GP_MSB, SIO_DATA); /* MSB of GPIO base address */
232 outb_p(SIO_DEV_LSB, SIO_INDEX);
233 outb_p(SIO_GP_LSB, SIO_DATA); /* LSB of GPIO base address */
235 outb_p(SIO_DEV_ENB, SIO_INDEX);
236 outb_p(1, SIO_DATA); /* Enable GPIO registers. */
239 * Now, we have to map the power management section to write
240 * a bit which enables access to the GPIO registers.
241 * What lunatic came up with this shit?
243 outb_p(SIO_DEV_SEL, SIO_INDEX);
244 outb_p(SIO_PM_DEV, SIO_DATA); /* Talk to GPIO regs. */
246 outb_p(SIO_DEV_MSB, SIO_INDEX);
247 outb_p(SIO_PM_MSB, SIO_DATA); /* MSB of PM base address */
249 outb_p(SIO_DEV_LSB, SIO_INDEX);
250 outb_p(SIO_PM_LSB, SIO_DATA); /* LSB of PM base address */
252 outb_p(SIO_DEV_ENB, SIO_INDEX);
253 outb_p(1, SIO_DATA); /* Enable PM registers. */
256 * Now, write the PM register which enables the GPIO registers.
258 outb_p(SIO_PM_FER2, SIO_PM_INDEX);
259 outb_p(SIO_PM_GP_EN, SIO_PM_DATA);
262 * Now, initialize the GPIO registers.
263 * We want them all to be inputs which is the
264 * power on default, so let's leave them alone.
265 * So, let's just read the board rev!
267 raw = inb_p(SIO_GP_DATA1);
268 raw &= 0x7f; /* 7 bits of valid board revision ID. */
270 if (visws_board_type == VISWS_320) {
271 if (raw < 0x6) {
272 visws_board_rev = 4;
273 } else if (raw < 0xc) {
274 visws_board_rev = 5;
275 } else {
276 visws_board_rev = 6;
279 } else if (visws_board_type == VISWS_540) {
280 visws_board_rev = 2;
281 } else {
282 visws_board_rev = raw;
285 printk("Silicon Graphics %s (rev %d)\n",
286 visws_board_type == VISWS_320 ? "320" :
287 (visws_board_type == VISWS_540 ? "540" :
288 "unknown"),
289 visws_board_rev);
291 #endif
294 static char command_line[COMMAND_LINE_SIZE] = { 0, };
295 char saved_command_line[COMMAND_LINE_SIZE];
297 struct resource standard_io_resources[] = {
298 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
299 { "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
300 { "timer", 0x40, 0x5f, IORESOURCE_BUSY },
301 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
302 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
303 { "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
304 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
305 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
308 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
310 static struct resource code_resource = { "Kernel code", 0x100000, 0 };
311 static struct resource data_resource = { "Kernel data", 0, 0 };
312 static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
314 /* System ROM resources */
315 #define MAXROMS 6
316 static struct resource rom_resources[MAXROMS] = {
317 { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
318 { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
321 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
323 static void __init probe_roms(void)
325 int roms = 1;
326 unsigned long base;
327 unsigned char *romstart;
329 request_resource(&iomem_resource, rom_resources+0);
331 /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
332 for (base = 0xC0000; base < 0xE0000; base += 2048) {
333 romstart = bus_to_virt(base);
334 if (!romsignature(romstart))
335 continue;
336 request_resource(&iomem_resource, rom_resources + roms);
337 roms++;
338 break;
341 /* Extension roms at C800:0000 - DFFF:0000 */
342 for (base = 0xC8000; base < 0xE0000; base += 2048) {
343 unsigned long length;
345 romstart = bus_to_virt(base);
346 if (!romsignature(romstart))
347 continue;
348 length = romstart[2] * 512;
349 if (length) {
350 unsigned int i;
351 unsigned char chksum;
353 chksum = 0;
354 for (i = 0; i < length; i++)
355 chksum += romstart[i];
357 /* Good checksum? */
358 if (!chksum) {
359 rom_resources[roms].start = base;
360 rom_resources[roms].end = base + length - 1;
361 rom_resources[roms].name = "Extension ROM";
362 rom_resources[roms].flags = IORESOURCE_BUSY;
364 request_resource(&iomem_resource, rom_resources + roms);
365 roms++;
366 if (roms >= MAXROMS)
367 return;
372 /* Final check for motherboard extension rom at E000:0000 */
373 base = 0xE0000;
374 romstart = bus_to_virt(base);
376 if (romsignature(romstart)) {
377 rom_resources[roms].start = base;
378 rom_resources[roms].end = base + 65535;
379 rom_resources[roms].name = "Extension ROM";
380 rom_resources[roms].flags = IORESOURCE_BUSY;
382 request_resource(&iomem_resource, rom_resources + roms);
386 void __init add_memory_region(unsigned long long start,
387 unsigned long long size, int type)
389 int x = e820.nr_map;
391 if (x == E820MAX) {
392 printk("Ooops! Too many entries in the memory map!\n");
393 return;
396 e820.map[x].addr = start;
397 e820.map[x].size = size;
398 e820.map[x].type = type;
399 e820.nr_map++;
400 } /* add_memory_region */
402 #define E820_DEBUG 1
404 static void __init print_memory_map(char *who)
406 int i;
408 for (i = 0; i < e820.nr_map; i++) {
409 printk(" %s: %016Lx @ %016Lx ", who,
410 e820.map[i].size, e820.map[i].addr);
411 switch (e820.map[i].type) {
412 case E820_RAM: printk("(usable)\n");
413 break;
414 case E820_RESERVED:
415 printk("(reserved)\n");
416 break;
417 case E820_ACPI:
418 printk("(ACPI data)\n");
419 break;
420 case E820_NVS:
421 printk("(ACPI NVS)\n");
422 break;
423 default: printk("type %lu\n", e820.map[i].type);
424 break;
430 * Copy the BIOS e820 map into a safe place.
432 * Sanity-check it while we're at it..
434 * If we're lucky and live on a modern system, the setup code
435 * will have given us a memory map that we can use to properly
436 * set up memory. If we aren't, we'll fake a memory map.
438 * We check to see that the memory map contains at least 2 elements
439 * before we'll use it, because the detection code in setup.S may
440 * not be perfect and most every PC known to man has two memory
441 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
442 * thinkpad 560x, for example, does not cooperate with the memory
443 * detection code.)
445 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
447 /* Only one memory region (or negative)? Ignore it */
448 if (nr_map < 2)
449 return -1;
451 do {
452 unsigned long long start = biosmap->addr;
453 unsigned long long size = biosmap->size;
454 unsigned long long end = start + size;
455 unsigned long type = biosmap->type;
457 /* Overflow in 64 bits? Ignore the memory map. */
458 if (start > end)
459 return -1;
462 * Some BIOSes claim RAM in the 640k - 1M region.
463 * Not right. Fix it up.
465 if (type == E820_RAM) {
466 if (start < 0x100000ULL && end > 0xA0000ULL) {
467 if (start < 0xA0000ULL)
468 add_memory_region(start, 0xA0000ULL-start, type);
469 if (end < 0x100000ULL)
470 continue;
471 start = 0x100000ULL;
472 size = end - start;
475 add_memory_region(start, size, type);
476 } while (biosmap++,--nr_map);
477 return 0;
481 * Do NOT EVER look at the BIOS memory size location.
482 * It does not work on many machines.
484 #define LOWMEMSIZE() (0x9f000)
486 void __init setup_memory_region(void)
488 char *who = "BIOS-e820";
491 * Try to copy the BIOS-supplied E820-map.
493 * Otherwise fake a memory map; one section from 0k->640k,
494 * the next section from 1mb->appropriate_mem_k
496 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
497 unsigned long mem_size;
499 /* compare results from other methods and take the greater */
500 if (ALT_MEM_K < EXT_MEM_K) {
501 mem_size = EXT_MEM_K;
502 who = "BIOS-88";
503 } else {
504 mem_size = ALT_MEM_K;
505 who = "BIOS-e801";
508 e820.nr_map = 0;
509 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
510 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
512 printk("BIOS-provided physical RAM map:\n");
513 print_memory_map(who);
514 } /* setup_memory_region */
517 static inline void parse_mem_cmdline (char ** cmdline_p)
519 char c = ' ', *to = command_line, *from = COMMAND_LINE;
520 int len = 0;
521 int usermem = 0;
523 /* Save unparsed command line copy for /proc/cmdline */
524 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
525 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
527 for (;;) {
529 * "mem=nopentium" disables the 4MB page tables.
530 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
531 * to <mem>, overriding the bios size.
532 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
533 * <start> to <start>+<mem>, overriding the bios size.
535 if (c == ' ' && !memcmp(from, "mem=", 4)) {
536 if (to != command_line)
537 to--;
538 if (!memcmp(from+4, "nopentium", 9)) {
539 from += 9+4;
540 boot_cpu_data.x86_capability &= ~X86_FEATURE_PSE;
541 } else if (!memcmp(from+4, "exactmap", 8)) {
542 from += 8+4;
543 e820.nr_map = 0;
544 usermem = 1;
545 } else {
546 /* If the user specifies memory size, we
547 * blow away any automatically generated
548 * size
550 unsigned long start_at, mem_size;
552 if (usermem == 0) {
553 /* first time in: zap the whitelist
554 * and reinitialize it with the
555 * standard low-memory region.
557 e820.nr_map = 0;
558 usermem = 1;
559 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
561 mem_size = memparse(from+4, &from);
562 if (*from == '@')
563 start_at = memparse(from+1, &from);
564 else {
565 start_at = HIGH_MEMORY;
566 mem_size -= HIGH_MEMORY;
567 usermem=0;
569 add_memory_region(start_at, mem_size, E820_RAM);
572 c = *(from++);
573 if (!c)
574 break;
575 if (COMMAND_LINE_SIZE <= ++len)
576 break;
577 *(to++) = c;
579 *to = '\0';
580 *cmdline_p = command_line;
581 if (usermem) {
582 printk("user-defined physical RAM map:\n");
583 print_memory_map("user");
587 void __init setup_arch(char **cmdline_p)
589 unsigned long bootmap_size;
590 unsigned long start_pfn, max_pfn, max_low_pfn;
591 int i;
593 #ifdef CONFIG_VISWS
594 visws_get_board_type_and_rev();
595 #endif
597 ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
598 drive_info = DRIVE_INFO;
599 screen_info = SCREEN_INFO;
600 apm_bios_info = APM_BIOS_INFO;
601 if( SYS_DESC_TABLE.length != 0 ) {
602 MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
603 machine_id = SYS_DESC_TABLE.table[0];
604 machine_submodel_id = SYS_DESC_TABLE.table[1];
605 BIOS_revision = SYS_DESC_TABLE.table[2];
607 aux_device_present = AUX_DEVICE_INFO;
609 #ifdef CONFIG_BLK_DEV_RAM
610 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
611 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
612 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
613 #endif
614 setup_memory_region();
616 if (!MOUNT_ROOT_RDONLY)
617 root_mountflags &= ~MS_RDONLY;
618 init_mm.start_code = (unsigned long) &_text;
619 init_mm.end_code = (unsigned long) &_etext;
620 init_mm.end_data = (unsigned long) &_edata;
621 init_mm.brk = (unsigned long) &_end;
623 code_resource.start = virt_to_bus(&_text);
624 code_resource.end = virt_to_bus(&_etext)-1;
625 data_resource.start = virt_to_bus(&_etext);
626 data_resource.end = virt_to_bus(&_edata)-1;
628 parse_mem_cmdline(cmdline_p);
630 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
631 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
632 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
635 * 128MB for vmalloc and initrd
637 #define VMALLOC_RESERVE (unsigned long)(128 << 20)
638 #define MAXMEM (unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)
639 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
640 #define MAX_NONPAE_PFN (1 << 20)
643 * partially used pages are not usable - thus
644 * we are rounding upwards:
646 start_pfn = PFN_UP(__pa(&_end));
649 * Find the highest page frame number we have available
651 max_pfn = 0;
652 for (i = 0; i < e820.nr_map; i++) {
653 unsigned long start, end;
654 /* RAM? */
655 if (e820.map[i].type != E820_RAM)
656 continue;
657 start = PFN_UP(e820.map[i].addr);
658 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
659 if (start >= end)
660 continue;
661 if (end > max_pfn)
662 max_pfn = end;
666 * Determine low and high memory ranges:
668 max_low_pfn = max_pfn;
669 if (max_low_pfn > MAXMEM_PFN) {
670 max_low_pfn = MAXMEM_PFN;
671 #ifndef CONFIG_HIGHMEM
672 /* Maximum memory usable is what is directly addressable */
673 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
674 MAXMEM>>20);
675 if (max_pfn > MAX_NONPAE_PFN)
676 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
677 else
678 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
679 #else /* !CONFIG_HIGHMEM */
680 #ifndef CONFIG_X86_PAE
681 if (max_pfn > MAX_NONPAE_PFN) {
682 max_pfn = MAX_NONPAE_PFN;
683 printk(KERN_WARNING "Warning only 4GB will be used.\n");
684 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
686 #endif /* !CONFIG_X86_PAE */
687 #endif /* !CONFIG_HIGHMEM */
690 #ifdef CONFIG_HIGHMEM
691 highstart_pfn = highend_pfn = max_pfn;
692 if (max_pfn > MAXMEM_PFN) {
693 highstart_pfn = MAXMEM_PFN;
694 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
695 pages_to_mb(highend_pfn - highstart_pfn));
697 #endif
699 * Initialize the boot-time allocator (with low memory only):
701 bootmap_size = init_bootmem(start_pfn, max_low_pfn);
704 * Register fully available low RAM pages with the bootmem allocator.
706 for (i = 0; i < e820.nr_map; i++) {
707 unsigned long curr_pfn, last_pfn, size;
709 * Reserve usable low memory
711 if (e820.map[i].type != E820_RAM)
712 continue;
714 * We are rounding up the start address of usable memory:
716 curr_pfn = PFN_UP(e820.map[i].addr);
717 if (curr_pfn >= max_low_pfn)
718 continue;
720 * ... and at the end of the usable range downwards:
722 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
724 if (last_pfn > max_low_pfn)
725 last_pfn = max_low_pfn;
728 * .. finally, did all the rounding and playing
729 * around just make the area go away?
731 if (last_pfn <= curr_pfn)
732 continue;
734 size = last_pfn - curr_pfn;
735 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
738 * Reserve the bootmem bitmap itself as well. We do this in two
739 * steps (first step was init_bootmem()) because this catches
740 * the (very unlikely) case of us accidentally initializing the
741 * bootmem allocator with an invalid RAM area.
743 reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
744 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
747 * reserve physical page 0 - it's a special BIOS page on many boxes,
748 * enabling clean reboots, SMP operation, laptop functions.
750 reserve_bootmem(0, PAGE_SIZE);
752 #ifdef CONFIG_SMP
754 * But first pinch a few for the stack/trampoline stuff
755 * FIXME: Don't need the extra page at 4K, but need to fix
756 * trampoline before removing it. (see the GDT stuff)
758 reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
759 smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
760 #endif
762 #ifdef CONFIG_X86_IO_APIC
764 * Find and reserve possible boot-time SMP configuration:
766 find_smp_config();
767 #endif
768 paging_init();
769 #ifdef CONFIG_X86_IO_APIC
771 * get boot-time SMP configuration:
773 if (smp_found_config)
774 get_smp_config();
775 #endif
776 #ifdef CONFIG_X86_LOCAL_APIC
777 init_apic_mappings();
778 #endif
780 #ifdef CONFIG_BLK_DEV_INITRD
781 if (LOADER_TYPE && INITRD_START) {
782 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
783 reserve_bootmem(INITRD_START, INITRD_SIZE);
784 initrd_start =
785 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
786 initrd_end = initrd_start+INITRD_SIZE;
788 else {
789 printk("initrd extends beyond end of memory "
790 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
791 INITRD_START + INITRD_SIZE,
792 max_low_pfn << PAGE_SHIFT);
793 initrd_start = 0;
796 #endif
799 * Request address space for all standard RAM and ROM resources
800 * and also for regions reported as reserved by the e820.
802 probe_roms();
803 for (i = 0; i < e820.nr_map; i++) {
804 struct resource *res;
805 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
806 continue;
807 res = alloc_bootmem_low(sizeof(struct resource));
808 switch (e820.map[i].type) {
809 case E820_RAM: res->name = "System RAM"; break;
810 case E820_ACPI: res->name = "ACPI Tables"; break;
811 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
812 default: res->name = "reserved";
814 res->start = e820.map[i].addr;
815 res->end = res->start + e820.map[i].size - 1;
816 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
817 request_resource(&iomem_resource, res);
818 if (e820.map[i].type == E820_RAM) {
820 * We dont't know which RAM region contains kernel data,
821 * so we try it repeatedly and let the resource manager
822 * test it.
824 request_resource(res, &code_resource);
825 request_resource(res, &data_resource);
828 request_resource(&iomem_resource, &vram_resource);
830 /* request I/O space for devices used on all i[345]86 PCs */
831 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
832 request_resource(&ioport_resource, standard_io_resources+i);
834 #ifdef CONFIG_VT
835 #if defined(CONFIG_VGA_CONSOLE)
836 conswitchp = &vga_con;
837 #elif defined(CONFIG_DUMMY_CONSOLE)
838 conswitchp = &dummy_con;
839 #endif
840 #endif
843 static int __init get_model_name(struct cpuinfo_x86 *c)
845 unsigned int n, dummy, *v;
848 * Actually we must have cpuid or we could never have
849 * figured out that this was AMD/Cyrix/Transmeta
850 * from the vendor info :-).
853 cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
854 if (n < 0x80000004)
855 return 0;
856 cpuid(0x80000001, &dummy, &dummy, &dummy, &(c->x86_capability));
857 v = (unsigned int *) c->x86_model_id;
858 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
859 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
860 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
861 c->x86_model_id[48] = 0;
862 return 1;
865 static int __init amd_model(struct cpuinfo_x86 *c)
867 u32 l, h;
868 unsigned long flags;
869 unsigned int n, dummy, ecx, edx;
870 int mbytes = max_mapnr >> (20-PAGE_SHIFT);
872 int r=get_model_name(c);
875 * Set MTRR capability flag if appropriate
877 if(boot_cpu_data.x86 == 5) {
878 if((boot_cpu_data.x86_model == 9) ||
879 ((boot_cpu_data.x86_model == 8) &&
880 (boot_cpu_data.x86_mask >= 8)))
881 c->x86_capability |= X86_FEATURE_MTRR;
885 * Now do the cache operations.
887 switch(c->x86)
889 case 5:
890 if( c->x86_model < 6 )
892 /* Anyone with a K5 want to fill this in */
893 break;
896 /* K6 with old style WHCR */
897 if( c->x86_model < 8 ||
898 (c->x86_model== 8 && c->x86_mask < 8))
900 /* We can only write allocate on the low 508Mb */
901 if(mbytes>508)
902 mbytes=508;
904 rdmsr(0xC0000082, l, h);
905 if((l&0x0000FFFF)==0)
907 l=(1<<0)|((mbytes/4)<<1);
908 save_flags(flags);
909 __cli();
910 __asm__ __volatile__ ("wbinvd": : :"memory");
911 wrmsr(0xC0000082, l, h);
912 restore_flags(flags);
913 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
914 mbytes);
917 break;
919 if (c->x86_model == 8 || c->x86_model == 9)
921 /* The more serious chips .. */
923 if(mbytes>4092)
924 mbytes=4092;
925 rdmsr(0xC0000082, l, h);
926 if((l&0xFFFF0000)==0)
928 l=((mbytes>>2)<<22)|(1<<16);
929 save_flags(flags);
930 __cli();
931 __asm__ __volatile__ ("wbinvd": : :"memory");
932 wrmsr(0xC0000082, l, h);
933 restore_flags(flags);
934 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
935 mbytes);
937 break;
939 break;
940 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
941 break;
944 cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
945 if (n >= 0x80000005) {
946 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
947 printk("CPU: L1 I Cache: %dK L1 D Cache: %dK (%d bytes/line)\n",
948 edx>>24, ecx>>24, edx&0xFF);
949 c->x86_cache_size=(ecx>>24)+(edx>>24);
952 /* AMD errata T13 (order #21922) */
953 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 3 &&
954 boot_cpu_data.x86_mask == 0)
956 c->x86_cache_size = 64;
957 printk("CPU: L2 Cache: 64K\n");
958 } else {
959 if (n >= 0x80000006) {
960 cpuid(0x80000006, &dummy, &dummy, &ecx, &edx);
961 printk("CPU: L2 Cache: %dK\n", ecx>>16);
962 c->x86_cache_size=(ecx>>16);
966 return r;
971 * Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
973 static inline void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
975 unsigned char ccr2, ccr3;
977 /* we test for DEVID by checking whether CCR3 is writable */
978 cli();
979 ccr3 = getCx86(CX86_CCR3);
980 setCx86(CX86_CCR3, ccr3 ^ 0x80);
981 getCx86(0xc0); /* dummy to change bus */
983 if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
984 ccr2 = getCx86(CX86_CCR2);
985 setCx86(CX86_CCR2, ccr2 ^ 0x04);
986 getCx86(0xc0); /* dummy */
988 if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
989 *dir0 = 0xfd;
990 else { /* Cx486S A step */
991 setCx86(CX86_CCR2, ccr2);
992 *dir0 = 0xfe;
995 else {
996 setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
998 /* read DIR0 and DIR1 CPU registers */
999 *dir0 = getCx86(CX86_DIR0);
1000 *dir1 = getCx86(CX86_DIR1);
1002 sti();
1006 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
1007 * order to identify the Cyrix CPU model after we're out of setup.c
1009 unsigned char Cx86_dir0_msb __initdata = 0;
1011 static char Cx86_model[][9] __initdata = {
1012 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
1013 "M II ", "Unknown"
1015 static char Cx486_name[][5] __initdata = {
1016 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
1017 "SRx2", "DRx2"
1019 static char Cx486S_name[][4] __initdata = {
1020 "S", "S2", "Se", "S2e"
1022 static char Cx486D_name[][4] __initdata = {
1023 "DX", "DX2", "?", "?", "?", "DX4"
1025 static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
1026 static char cyrix_model_mult1[] __initdata = "12??43";
1027 static char cyrix_model_mult2[] __initdata = "12233445";
1029 static void __init cyrix_model(struct cpuinfo_x86 *c)
1031 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
1032 char *buf = c->x86_model_id;
1033 const char *p = NULL;
1035 do_cyrix_devid(&dir0, &dir1);
1037 Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
1038 dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
1040 /* common case step number/rev -- exceptions handled below */
1041 c->x86_model = (dir1 >> 4) + 1;
1042 c->x86_mask = dir1 & 0xf;
1044 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
1045 * We do the same thing for each generation: we work out
1046 * the model, multiplier and stepping. Black magic included,
1047 * to make the silicon step/rev numbers match the printed ones.
1050 switch (dir0_msn) {
1051 unsigned char tmp;
1053 case 0: /* Cx486SLC/DLC/SRx/DRx */
1054 p = Cx486_name[dir0_lsn & 7];
1055 break;
1057 case 1: /* Cx486S/DX/DX2/DX4 */
1058 p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
1059 : Cx486S_name[dir0_lsn & 3];
1060 break;
1062 case 2: /* 5x86 */
1063 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1064 p = Cx86_cb+2;
1065 break;
1067 case 3: /* 6x86/6x86L */
1068 Cx86_cb[1] = ' ';
1069 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1070 if (dir1 > 0x21) { /* 686L */
1071 Cx86_cb[0] = 'L';
1072 p = Cx86_cb;
1073 (c->x86_model)++;
1074 } else /* 686 */
1075 p = Cx86_cb+1;
1076 /* Emulate MTRRs using Cyrix's ARRs. */
1077 c->x86_capability |= X86_FEATURE_MTRR;
1078 /* 6x86's contain this bug */
1079 c->coma_bug = 1;
1080 break;
1082 case 4: /* MediaGX/GXm */
1084 * Life sometimes gets weiiiiiiiird if we use this
1085 * on the MediaGX. So we turn it off for now.
1088 #ifdef CONFIG_PCI
1089 /* It isnt really a PCI quirk directly, but the cure is the
1090 same. The MediaGX has deep magic SMM stuff that handles the
1091 SB emulation. It thows away the fifo on disable_dma() which
1092 is wrong and ruins the audio.
1094 Bug2: VSA1 has a wrap bug so that using maximum sized DMA
1095 causes bad things. According to NatSemi VSA2 has another
1096 bug to do with 'hlt'. I've not seen any boards using VSA2
1097 and X doesn't seem to support it either so who cares 8).
1098 VSA1 we work around however.
1102 printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
1103 isa_dma_bridge_buggy = 2;
1104 #endif
1105 c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
1107 /* GXm supports extended cpuid levels 'ala' AMD */
1108 if (c->cpuid_level == 2) {
1109 get_model_name(c); /* get CPU marketing name */
1110 c->x86_capability&=~X86_FEATURE_TSC;
1111 return;
1113 else { /* MediaGX */
1114 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
1115 p = Cx86_cb+2;
1116 c->x86_model = (dir1 & 0x20) ? 1 : 2;
1117 c->x86_capability&=~X86_FEATURE_TSC;
1119 break;
1121 case 5: /* 6x86MX/M II */
1122 if (dir1 > 7) dir0_msn++; /* M II */
1123 else c->coma_bug = 1; /* 6x86MX, it has the bug. */
1124 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
1125 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
1126 p = Cx86_cb+tmp;
1127 if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
1128 (c->x86_model)++;
1129 /* Emulate MTRRs using Cyrix's ARRs. */
1130 c->x86_capability |= X86_FEATURE_MTRR;
1131 break;
1133 case 0xf: /* Cyrix 486 without DEVID registers */
1134 switch (dir0_lsn) {
1135 case 0xd: /* either a 486SLC or DLC w/o DEVID */
1136 dir0_msn = 0;
1137 p = Cx486_name[(c->hard_math) ? 1 : 0];
1138 break;
1140 case 0xe: /* a 486S A step */
1141 dir0_msn = 0;
1142 p = Cx486S_name[0];
1143 break;
1145 break;
1147 default: /* unknown (shouldn't happen, we know everyone ;-) */
1148 dir0_msn = 7;
1149 break;
1151 strcpy(buf, Cx86_model[dir0_msn & 7]);
1152 if (p) strcat(buf, p);
1153 return;
1156 static void __init centaur_model(struct cpuinfo_x86 *c)
1158 enum {
1159 ECX8=1<<1,
1160 EIERRINT=1<<2,
1161 DPM=1<<3,
1162 DMCE=1<<4,
1163 DSTPCLK=1<<5,
1164 ELINEAR=1<<6,
1165 DSMC=1<<7,
1166 DTLOCK=1<<8,
1167 EDCTLB=1<<8,
1168 EMMX=1<<9,
1169 DPDC=1<<11,
1170 EBRPRED=1<<12,
1171 DIC=1<<13,
1172 DDC=1<<14,
1173 DNA=1<<15,
1174 ERETSTK=1<<16,
1175 E2MMX=1<<19,
1176 EAMD3D=1<<20,
1179 char *name;
1180 u32 fcr_set=0;
1181 u32 fcr_clr=0;
1182 u32 lo,hi,newlo;
1183 u32 aa,bb,cc,dd;
1185 switch(c->x86_model) {
1186 case 4:
1187 name="C6";
1188 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
1189 fcr_clr=DPDC;
1190 printk("Disabling bugged TSC.\n");
1191 c->x86_capability &= ~X86_FEATURE_TSC;
1192 break;
1193 case 8:
1194 switch(c->x86_mask) {
1195 default:
1196 name="2";
1197 break;
1198 case 7 ... 9:
1199 name="2A";
1200 break;
1201 case 10 ... 15:
1202 name="2B";
1203 break;
1205 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
1206 fcr_clr=DPDC;
1207 break;
1208 case 9:
1209 name="3";
1210 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
1211 fcr_clr=DPDC;
1212 break;
1213 case 10:
1214 name="4";
1215 /* no info on the WC4 yet */
1216 break;
1217 default:
1218 name="??";
1221 /* get FCR */
1222 rdmsr(0x107, lo, hi);
1224 newlo=(lo|fcr_set) & (~fcr_clr);
1226 if (newlo!=lo) {
1227 printk("Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
1228 wrmsr(0x107, newlo, hi );
1229 } else {
1230 printk("Centaur FCR is 0x%X\n",lo);
1233 /* Emulate MTRRs using Centaur's MCR. */
1234 c->x86_capability |= X86_FEATURE_MTRR;
1235 /* Report CX8 */
1236 c->x86_capability |= X86_FEATURE_CX8;
1237 /* Set 3DNow! on Winchip 2 and above. */
1238 if (c->x86_model >=8)
1239 c->x86_capability |= X86_FEATURE_AMD3D;
1240 /* See if we can find out some more. */
1241 cpuid(0x80000000,&aa,&bb,&cc,&dd);
1242 if (aa>=0x80000005) { /* Yes, we can. */
1243 cpuid(0x80000005,&aa,&bb,&cc,&dd);
1244 /* Add L1 data and code cache sizes. */
1245 c->x86_cache_size = (cc>>24)+(dd>>24);
1247 sprintf( c->x86_model_id, "WinChip %s", name );
1250 static void __init transmeta_model(struct cpuinfo_x86 *c)
1252 unsigned int cap_mask, uk, max, dummy, n, ecx, edx;
1253 unsigned int cms_rev1, cms_rev2;
1254 unsigned int cpu_rev, cpu_freq, cpu_flags;
1255 char cpu_info[65];
1257 get_model_name(c); /* Same as AMD/Cyrix */
1259 /* Print CMS and CPU revision */
1260 cpuid(0x80860000, &max, &dummy, &dummy, &dummy);
1261 if ( max >= 0x80860001 ) {
1262 cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
1263 printk("CPU: Processor revision %u.%u.%u.%u, %u MHz%s%s\n",
1264 (cpu_rev >> 24) & 0xff,
1265 (cpu_rev >> 16) & 0xff,
1266 (cpu_rev >> 8) & 0xff,
1267 cpu_rev & 0xff,
1268 cpu_freq,
1269 (cpu_flags & 1) ? " [recovery]" : "",
1270 (cpu_flags & 2) ? " [longrun]" : "");
1272 if ( max >= 0x80860002 ) {
1273 cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
1274 printk("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
1275 (cms_rev1 >> 24) & 0xff,
1276 (cms_rev1 >> 16) & 0xff,
1277 (cms_rev1 >> 8) & 0xff,
1278 cms_rev1 & 0xff,
1279 cms_rev2);
1281 if ( max >= 0x80860006 ) {
1282 cpuid(0x80860003,
1283 (void *)&cpu_info[0],
1284 (void *)&cpu_info[4],
1285 (void *)&cpu_info[8],
1286 (void *)&cpu_info[12]);
1287 cpuid(0x80860004,
1288 (void *)&cpu_info[16],
1289 (void *)&cpu_info[20],
1290 (void *)&cpu_info[24],
1291 (void *)&cpu_info[28]);
1292 cpuid(0x80860005,
1293 (void *)&cpu_info[32],
1294 (void *)&cpu_info[36],
1295 (void *)&cpu_info[40],
1296 (void *)&cpu_info[44]);
1297 cpuid(0x80860006,
1298 (void *)&cpu_info[48],
1299 (void *)&cpu_info[52],
1300 (void *)&cpu_info[56],
1301 (void *)&cpu_info[60]);
1302 cpu_info[64] = '\0';
1303 printk("CPU: %s\n", cpu_info);
1306 /* Unhide possibly hidden flags */
1307 rdmsr(0x80860004, cap_mask, uk);
1308 wrmsr(0x80860004, ~0, uk);
1309 cpuid(0x00000001, &dummy, &dummy, &dummy, &c->x86_capability);
1310 wrmsr(0x80860004, cap_mask, uk);
1313 /* L1/L2 cache */
1314 cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
1316 if (n >= 0x80000005) {
1317 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
1318 printk("CPU: L1 I Cache: %dK L1 D Cache: %dK\n",
1319 ecx>>24, edx>>24);
1320 c->x86_cache_size=(ecx>>24)+(edx>>24);
1322 if (n >= 0x80000006) {
1323 cpuid(0x80000006, &dummy, &dummy, &ecx, &edx);
1324 printk("CPU: L2 Cache: %dK\n", ecx>>16);
1325 c->x86_cache_size=(ecx>>16);
1330 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
1332 char *v = c->x86_vendor_id;
1334 if (!strcmp(v, "GenuineIntel"))
1335 c->x86_vendor = X86_VENDOR_INTEL;
1336 else if (!strcmp(v, "AuthenticAMD"))
1337 c->x86_vendor = X86_VENDOR_AMD;
1338 else if (!strcmp(v, "CyrixInstead"))
1339 c->x86_vendor = X86_VENDOR_CYRIX;
1340 else if (!strcmp(v, "UMC UMC UMC "))
1341 c->x86_vendor = X86_VENDOR_UMC;
1342 else if (!strcmp(v, "CentaurHauls"))
1343 c->x86_vendor = X86_VENDOR_CENTAUR;
1344 else if (!strcmp(v, "NexGenDriven"))
1345 c->x86_vendor = X86_VENDOR_NEXGEN;
1346 else if (!strcmp(v, "RiseRiseRise"))
1347 c->x86_vendor = X86_VENDOR_RISE;
1348 else if (!strcmp(v, "GenuineTMx86"))
1349 c->x86_vendor = X86_VENDOR_TRANSMETA;
1350 else
1351 c->x86_vendor = X86_VENDOR_UNKNOWN;
1354 struct cpu_model_info {
1355 int vendor;
1356 int x86;
1357 char *model_names[16];
1360 /* Naming convention should be: <Name> [(<Codename>)] */
1361 static struct cpu_model_info cpu_models[] __initdata = {
1362 { X86_VENDOR_INTEL, 4,
1363 { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL",
1364 "486 SX/2", NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL,
1365 NULL, NULL, NULL, NULL, NULL }},
1366 { X86_VENDOR_INTEL, 5,
1367 { "Pentium 60/66 A-step", "Pentium 60/66", "Pentium 75 - 200",
1368 "OverDrive PODP5V83", "Pentium MMX", NULL, NULL,
1369 "Mobile Pentium 75 - 200", "Mobile Pentium MMX", NULL, NULL, NULL,
1370 NULL, NULL, NULL, NULL }},
1371 { X86_VENDOR_INTEL, 6,
1372 { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
1373 NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
1374 "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
1375 "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
1376 { X86_VENDOR_AMD, 4,
1377 { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
1378 "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
1379 "Am5x86-WB" }},
1380 { X86_VENDOR_AMD, 5,
1381 { "K5/SSA5", "K5",
1382 "K5", "K5", NULL, NULL,
1383 "K6", "K6", "K6-2",
1384 "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
1385 { X86_VENDOR_AMD, 6,
1386 { "Athlon", "Athlon",
1387 "Athlon", NULL, "Athlon", NULL,
1388 NULL, NULL, NULL,
1389 NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1390 { X86_VENDOR_UMC, 4,
1391 { NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1392 NULL, NULL, NULL, NULL, NULL, NULL }},
1393 { X86_VENDOR_NEXGEN, 5,
1394 { "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1395 NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1396 { X86_VENDOR_RISE, 5,
1397 { "mP6", "mP6", NULL, NULL, NULL, NULL, NULL,
1398 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1399 { X86_VENDOR_TRANSMETA, 5,
1400 { NULL, NULL, NULL, "Crusoe", NULL, NULL, NULL,
1401 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
1405 * Detect a NexGen CPU running without BIOS hypercode new enough
1406 * to have CPUID. (Thanks to Herbert Oppmann)
1409 static int deep_magic_nexgen_probe(void)
1411 int ret;
1413 __asm__ __volatile__ (
1414 " movw $0x5555, %%ax\n"
1415 " xorw %%dx,%%dx\n"
1416 " movw $2, %%cx\n"
1417 " divw %%cx\n"
1418 " movl $0, %%eax\n"
1419 " jnz 1f\n"
1420 " movl $1, %%eax\n"
1421 "1:\n"
1422 : "=a" (ret) : : "cx", "dx" );
1423 return ret;
1426 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
1428 if(c->x86_capability&(1<<18)) {
1429 /* Disable processor serial number */
1430 unsigned long lo,hi;
1431 rdmsr(0x119,lo,hi);
1432 lo |= 0x200000;
1433 wrmsr(0x119,lo,hi);
1434 printk(KERN_INFO "CPU serial number disabled.\n");
1438 void __init identify_cpu(struct cpuinfo_x86 *c)
1440 int i=0;
1441 char *p = NULL;
1443 c->loops_per_sec = loops_per_sec;
1444 c->x86_cache_size = -1;
1446 get_cpu_vendor(c);
1449 switch (c->x86_vendor) {
1451 case X86_VENDOR_UNKNOWN:
1452 if (c->cpuid_level < 0)
1454 /* It may be a nexgen with cpuid disabled.. */
1455 if(deep_magic_nexgen_probe())
1457 strcpy(c->x86_model_id, "Nx586");
1458 c->x86_vendor = X86_VENDOR_NEXGEN;
1460 return;
1462 break;
1464 case X86_VENDOR_CYRIX:
1465 cyrix_model(c);
1466 return;
1468 case X86_VENDOR_AMD:
1469 if (amd_model(c))
1470 return;
1471 break;
1473 case X86_VENDOR_CENTAUR:
1474 centaur_model(c);
1475 return;
1477 case X86_VENDOR_INTEL:
1479 squash_the_stupid_serial_number(c);
1481 if (c->cpuid_level > 1) {
1482 /* supports eax=2 call */
1483 int edx, dummy;
1485 cpuid(2, &dummy, &dummy, &dummy, &edx);
1487 /* We need only the LSB */
1488 edx &= 0xff;
1490 switch (edx) {
1491 case 0x40:
1492 c->x86_cache_size = 0;
1493 break;
1495 case 0x41: /* 4-way 128 */
1496 c->x86_cache_size = 128;
1497 break;
1499 case 0x42: /* 4-way 256 */
1500 case 0x82: /* 8-way 256 */
1501 c->x86_cache_size = 256;
1502 break;
1504 case 0x43: /* 4-way 512 */
1505 c->x86_cache_size = 512;
1506 break;
1508 case 0x44: /* 4-way 1024 */
1509 case 0x84: /* 8-way 1024 */
1510 c->x86_cache_size = 1024;
1511 break;
1513 case 0x45: /* 4-way 2048 */
1514 case 0x85: /* 8-way 2048 */
1515 c->x86_cache_size = 2048;
1516 break;
1518 default:
1519 c->x86_cache_size = 0;
1520 break;
1524 /* Names for the Pentium II/Celeron processors
1525 detectable only by also checking the cache size.
1526 Dixon is NOT a Celeron. */
1527 if (c->x86 == 6) {
1528 switch (c->x86_model) {
1529 case 5:
1530 if (c->x86_cache_size == 0)
1531 p = "Celeron (Covington)";
1532 if (c->x86_cache_size == 256)
1533 p = "Mobile Pentium II (Dixon)";
1534 break;
1536 case 6:
1537 if (c->x86_cache_size == 128)
1538 p = "Celeron (Mendocino)";
1539 break;
1541 case 8:
1542 if (c->x86_cache_size == 128)
1543 p = "Celeron (Coppermine)";
1544 break;
1547 if (p!=NULL)
1548 goto name_decoded;
1550 break;
1552 case X86_VENDOR_TRANSMETA:
1553 transmeta_model(c);
1554 squash_the_stupid_serial_number(c);
1555 return;
1558 /* may be changed in the switch so needs to be after */
1560 if(c->x86_vendor == X86_VENDOR_NEXGEN)
1561 c->x86_cache_size = 256; /* A few had 1Mb.. */
1563 for (i = 0; i < sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) {
1564 if (cpu_models[i].vendor == c->x86_vendor &&
1565 cpu_models[i].x86 == c->x86) {
1566 if (c->x86_model <= 16)
1567 p = cpu_models[i].model_names[c->x86_model];
1571 name_decoded:
1573 if (p) {
1574 strcpy(c->x86_model_id, p);
1575 return;
1578 sprintf(c->x86_model_id, "%02x/%02x", c->x86_vendor, c->x86_model);
1582 * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
1585 void __init dodgy_tsc(void)
1587 get_cpu_vendor(&boot_cpu_data);
1589 if(boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)
1590 return;
1592 cyrix_model(&boot_cpu_data);
1597 static char *cpu_vendor_names[] __initdata = {
1598 "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
1601 void __init print_cpu_info(struct cpuinfo_x86 *c)
1603 char *vendor = NULL;
1605 if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
1606 vendor = cpu_vendor_names[c->x86_vendor];
1607 else if (c->cpuid_level >= 0)
1608 vendor = c->x86_vendor_id;
1610 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
1611 printk("%s ", vendor);
1613 if (!c->x86_model_id[0])
1614 printk("%d86", c->x86);
1615 else
1616 printk("%s", c->x86_model_id);
1618 if (c->x86_mask || c->cpuid_level>=0)
1619 printk(" stepping %02x\n", c->x86_mask);
1620 else
1621 printk("\n");
1625 * Get CPU information for use by the procfs.
1628 int get_cpuinfo(char * buffer)
1630 char *p = buffer;
1631 int sep_bug;
1634 * Flags should be entered into the array ONLY if there is no overlap.
1635 * Else a number should be used and then overridden in the case
1636 * statement below. --Jauder <jauderho@carumba.com>
1638 * NOTE: bits 10, 19-22, 26-31 are reserved.
1640 * Data courtesy of http://www.sandpile.org/arch/cpuid.htm
1641 * Thanks to the Greasel!
1643 static char *x86_cap_flags[] = {
1644 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1645 "cx8", "apic", "10", "sep", "mtrr", "pge", "mca", "cmov",
1646 "16", "pse36", "psn", "19", "20", "21", "22", "mmx",
1647 "24", "xmm", "26", "27", "28", "29", "30", "31"
1649 struct cpuinfo_x86 *c = cpu_data;
1650 int i, n;
1652 for (n = 0; n < NR_CPUS; n++, c++) {
1653 int fpu_exception;
1654 #ifdef CONFIG_SMP
1655 if (!(cpu_online_map & (1<<n)))
1656 continue;
1657 #endif
1658 p += sprintf(p,"processor\t: %d\n"
1659 "vendor_id\t: %s\n"
1660 "cpu family\t: %c\n"
1661 "model\t\t: %d\n"
1662 "model name\t: %s\n",
1664 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1665 c->x86 + '0',
1666 c->x86_model,
1667 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1669 if (c->x86_mask || c->cpuid_level >= 0)
1670 p += sprintf(p, "stepping\t: %d\n", c->x86_mask);
1671 else
1672 p += sprintf(p, "stepping\t: unknown\n");
1674 if (c->x86_capability & X86_FEATURE_TSC) {
1675 p += sprintf(p, "cpu MHz\t\t: %lu.%06lu\n",
1676 cpu_hz / 1000000, (cpu_hz % 1000000));
1679 /* Cache size */
1680 if (c->x86_cache_size >= 0)
1681 p += sprintf(p, "cache size\t: %d KB\n", c->x86_cache_size);
1683 /* Modify the capabilities according to chip type */
1684 switch (c->x86_vendor) {
1686 case X86_VENDOR_CYRIX:
1687 x86_cap_flags[24] = "cxmmx";
1688 break;
1690 case X86_VENDOR_AMD:
1691 if (c->x86 == 5 && c->x86_model == 6)
1692 x86_cap_flags[10] = "sep";
1693 if (c->x86 < 6)
1694 x86_cap_flags[16] = "fcmov";
1695 else
1696 x86_cap_flags[16] = "pat";
1697 x86_cap_flags[22] = "mmxext";
1698 x86_cap_flags[24] = "fxsr";
1699 x86_cap_flags[30] = "3dnowext";
1700 x86_cap_flags[31] = "3dnow";
1701 break;
1703 case X86_VENDOR_INTEL:
1704 x86_cap_flags[16] = "pat";
1705 x86_cap_flags[18] = "pn";
1706 x86_cap_flags[24] = "fxsr";
1707 x86_cap_flags[25] = "xmm";
1708 break;
1710 case X86_VENDOR_CENTAUR:
1711 if (c->x86_model >=8) /* Only Winchip2 and above */
1712 x86_cap_flags[31] = "3dnow";
1713 break;
1715 default:
1716 /* Unknown CPU manufacturer or no special handling needed */
1717 break;
1720 sep_bug = c->x86_vendor == X86_VENDOR_INTEL &&
1721 c->x86 == 0x06 &&
1722 c->cpuid_level >= 0 &&
1723 (c->x86_capability & X86_FEATURE_SEP) &&
1724 c->x86_model < 3 &&
1725 c->x86_mask < 3;
1727 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
1728 fpu_exception = c->hard_math && (ignore_irq13 | (c->x86_capability & X86_FEATURE_FPU));
1729 p += sprintf(p, "fdiv_bug\t: %s\n"
1730 "hlt_bug\t\t: %s\n"
1731 "sep_bug\t\t: %s\n"
1732 "f00f_bug\t: %s\n"
1733 "coma_bug\t: %s\n"
1734 "fpu\t\t: %s\n"
1735 "fpu_exception\t: %s\n"
1736 "cpuid level\t: %d\n"
1737 "wp\t\t: %s\n"
1738 "flags\t\t:",
1739 c->fdiv_bug ? "yes" : "no",
1740 c->hlt_works_ok ? "no" : "yes",
1741 sep_bug ? "yes" : "no",
1742 c->f00f_bug ? "yes" : "no",
1743 c->coma_bug ? "yes" : "no",
1744 c->hard_math ? "yes" : "no",
1745 fpu_exception ? "yes" : "no",
1746 c->cpuid_level,
1747 c->wp_works_ok ? "yes" : "no");
1749 for ( i = 0 ; i < 32 ; i++ )
1750 if ( c->x86_capability & (1 << i) )
1751 p += sprintf(p, " %s", x86_cap_flags[i]);
1753 p += sprintf(p, "\nbogomips\t: %lu.%02lu\n\n",
1754 (c->loops_per_sec+2500)/500000,
1755 ((c->loops_per_sec+2500)/5000) % 100);
1757 return p - buffer;
1760 #ifndef CONFIG_X86_TSC
1761 static int tsc_disable __initdata = 0;
1763 static int __init tsc_setup(char *str)
1765 tsc_disable = 1;
1766 return 1;
1769 __setup("notsc", tsc_setup);
1770 #endif
1772 static unsigned long cpu_initialized __initdata = 0;
1775 * cpu_init() initializes state that is per-CPU. Some data is already
1776 * initialized (naturally) in the bootstrap process, such as the GDT
1777 * and IDT. We reload them nevertheless, this function acts as a
1778 * 'CPU state barrier', nothing should get across.
1780 void __init cpu_init (void)
1782 int nr = smp_processor_id();
1783 struct tss_struct * t = &init_tss[nr];
1785 if (test_and_set_bit(nr, &cpu_initialized)) {
1786 printk("CPU#%d already initialized!\n", nr);
1787 for (;;) __sti();
1789 printk("Initializing CPU#%d\n", nr);
1791 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1792 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1793 #ifndef CONFIG_X86_TSC
1794 if (tsc_disable && cpu_has_tsc) {
1795 printk("Disabling TSC...\n");
1796 boot_cpu_data.x86_capability &= ~X86_FEATURE_TSC;
1797 set_in_cr4(X86_CR4_TSD);
1799 #endif
1801 __asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
1802 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
1805 * Delete NT
1807 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
1810 * set up and load the per-CPU TSS and LDT
1812 atomic_inc(&init_mm.mm_count);
1813 current->active_mm = &init_mm;
1814 if(current->mm)
1815 BUG();
1816 enter_lazy_tlb(&init_mm, current, nr);
1818 t->esp0 = current->thread.esp0;
1819 set_tss_desc(nr,t);
1820 gdt_table[__TSS(nr)].b &= 0xfffffdff;
1821 load_TR(nr);
1822 load_LDT(&init_mm);
1825 * Clear all 6 debug registers:
1828 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
1830 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
1832 #undef CD
1835 * Force FPU initialization:
1837 current->flags &= ~PF_USEDFPU;
1838 current->used_math = 0;
1839 stts();