[PATCH] APIC fixes for x86-64
[linux-2.6/history.git] / arch / x86_64 / kernel / setup.c
blob2a5f5515d3ef9f617649b7864d04034395d00a38
1 /*
2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
9 * $Id$
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/root_dev.h>
39 #include <linux/pci.h>
40 #include <linux/acpi.h>
41 #include <linux/kallsyms.h>
42 #include <asm/mtrr.h>
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/smp.h>
47 #include <asm/msr.h>
48 #include <asm/desc.h>
49 #include <video/edid.h>
50 #include <asm/e820.h>
51 #include <asm/dma.h>
52 #include <asm/mpspec.h>
53 #include <asm/mmu_context.h>
54 #include <asm/bootsetup.h>
55 #include <asm/smp.h>
56 #include <asm/proto.h>
59 * Machine setup..
62 struct cpuinfo_x86 boot_cpu_data;
64 unsigned long mmu_cr4_features;
65 EXPORT_SYMBOL_GPL(mmu_cr4_features);
67 int acpi_disabled = 0;
68 int acpi_ht = 0;
70 /* For PCI or other memory-mapped resources */
71 unsigned long pci_mem_start = 0x10000000;
73 unsigned long saved_video_mode;
76 * Setup options
78 struct drive_info_struct { char dummy[32]; } drive_info;
79 struct screen_info screen_info;
80 struct sys_desc_table_struct {
81 unsigned short length;
82 unsigned char table[0];
85 struct edid_info edid_info;
86 struct e820map e820;
88 unsigned char aux_device_present;
90 extern int root_mountflags;
91 extern char _text, _etext, _edata, _end;
93 char command_line[COMMAND_LINE_SIZE];
94 char saved_command_line[COMMAND_LINE_SIZE];
96 struct resource standard_io_resources[] = {
97 { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
98 { "pic1", 0x20, 0x21, IORESOURCE_BUSY },
99 { "timer", 0x40, 0x5f, IORESOURCE_BUSY },
100 { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
101 { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
102 { "pic2", 0xa0, 0xa1, IORESOURCE_BUSY },
103 { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
104 { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
107 #define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
109 struct resource code_resource = { "Kernel code", 0x100000, 0 };
110 struct resource data_resource = { "Kernel data", 0, 0 };
111 struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
113 /* System ROM resources */
114 #define MAXROMS 6
115 static struct resource rom_resources[MAXROMS] = {
116 { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
117 { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
120 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
122 static void __init probe_roms(void)
124 int roms = 1;
125 unsigned long base;
126 unsigned char *romstart;
128 request_resource(&iomem_resource, rom_resources+0);
130 /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
131 for (base = 0xC0000; base < 0xE0000; base += 2048) {
132 romstart = isa_bus_to_virt(base);
133 if (!romsignature(romstart))
134 continue;
135 request_resource(&iomem_resource, rom_resources + roms);
136 roms++;
137 break;
140 /* Extension roms at C800:0000 - DFFF:0000 */
141 for (base = 0xC8000; base < 0xE0000; base += 2048) {
142 unsigned long length;
144 romstart = isa_bus_to_virt(base);
145 if (!romsignature(romstart))
146 continue;
147 length = romstart[2] * 512;
148 if (length) {
149 unsigned int i;
150 unsigned char chksum;
152 chksum = 0;
153 for (i = 0; i < length; i++)
154 chksum += romstart[i];
156 /* Good checksum? */
157 if (!chksum) {
158 rom_resources[roms].start = base;
159 rom_resources[roms].end = base + length - 1;
160 rom_resources[roms].name = "Extension ROM";
161 rom_resources[roms].flags = IORESOURCE_BUSY;
163 request_resource(&iomem_resource, rom_resources + roms);
164 roms++;
165 if (roms >= MAXROMS)
166 return;
171 /* Final check for motherboard extension rom at E000:0000 */
172 base = 0xE0000;
173 romstart = isa_bus_to_virt(base);
175 if (romsignature(romstart)) {
176 rom_resources[roms].start = base;
177 rom_resources[roms].end = base + 65535;
178 rom_resources[roms].name = "Extension ROM";
179 rom_resources[roms].flags = IORESOURCE_BUSY;
181 request_resource(&iomem_resource, rom_resources + roms);
185 static __init void parse_cmdline_early (char ** cmdline_p)
187 char c = ' ', *to = command_line, *from = COMMAND_LINE;
188 int len = 0;
190 /* Save unparsed command line copy for /proc/cmdline */
191 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
192 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
194 for (;;) {
195 if (c != ' ')
196 goto next_char;
198 /* "acpi=off" disables both ACPI table parsing and interpreter init */
199 if (!memcmp(from, "acpi=off", 8))
200 acpi_disabled = 1;
202 if (!memcmp(from, "acpi=force", 10)) {
203 /* add later when we do DMI horrors: */
204 /* acpi_force = 1; */
205 acpi_disabled = 0;
208 /* acpi=ht just means: do ACPI MADT parsing
209 at bootup, but don't enable the full ACPI interpreter */
210 if (!memcmp(from, "acpi=ht", 7)) {
211 acpi_ht = 1;
214 if (!memcmp(from, "nolapic", 7) ||
215 !memcmp(from, "disableapic", 11))
216 disable_apic = 1;
218 if (!memcmp(from, "noapic", 6))
219 skip_ioapic_setup = 1;
221 if (!memcmp(from, "apic", 6)) {
222 skip_ioapic_setup = 0;
223 ioapic_force = 1;
226 if (!memcmp(from, "mem=", 4))
227 parse_memopt(from+4, &from);
229 #ifdef CONFIG_DISCONTIGMEM
230 if (!memcmp(from, "numa=", 5))
231 numa_setup(from+5);
232 #endif
234 #ifdef CONFIG_GART_IOMMU
235 if (!memcmp(from,"iommu=",6)) {
236 iommu_setup(from+6);
238 #endif
240 next_char:
241 c = *(from++);
242 if (!c)
243 break;
244 if (COMMAND_LINE_SIZE <= ++len)
245 break;
246 *(to++) = c;
248 *to = '\0';
249 *cmdline_p = command_line;
252 #ifndef CONFIG_DISCONTIGMEM
253 static void __init contig_initmem_init(void)
255 unsigned long bootmap_size, bootmap;
256 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
257 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
258 if (bootmap == -1L)
259 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
260 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
261 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
262 reserve_bootmem(bootmap, bootmap_size);
264 #endif
266 /* Use inline assembly to define this because the nops are defined
267 as inline assembly strings in the include files and we cannot
268 get them easily into strings. */
269 asm("\t.data\nk8nops: "
270 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
271 K8_NOP7 K8_NOP8);
273 extern unsigned char k8nops[];
274 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
275 NULL,
276 k8nops,
277 k8nops + 1,
278 k8nops + 1 + 2,
279 k8nops + 1 + 2 + 3,
280 k8nops + 1 + 2 + 3 + 4,
281 k8nops + 1 + 2 + 3 + 4 + 5,
282 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
283 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
286 /* Replace instructions with better alternatives for this CPU type.
288 This runs before SMP is initialized to avoid SMP problems with
289 self modifying code. This implies that assymetric systems where
290 APs have less capabilities than the boot processor are not handled.
291 In this case boot with "noreplacement". */
292 void apply_alternatives(void *start, void *end)
294 struct alt_instr *a;
295 int diff, i, k;
296 for (a = start; (void *)a < end; a++) {
297 if (!boot_cpu_has(a->cpuid))
298 continue;
300 BUG_ON(a->replacementlen > a->instrlen);
301 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
302 diff = a->instrlen - a->replacementlen;
304 /* Pad the rest with nops */
305 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
306 k = diff;
307 if (k > ASM_NOP_MAX)
308 k = ASM_NOP_MAX;
309 __inline_memcpy(a->instr + i, k8_nops[k], k);
314 static int no_replacement __initdata = 0;
316 void __init alternative_instructions(void)
318 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
319 if (no_replacement)
320 return;
321 apply_alternatives(__alt_instructions, __alt_instructions_end);
324 static int __init noreplacement_setup(char *s)
326 no_replacement = 1;
327 return 0;
330 __setup("noreplacement", noreplacement_setup);
332 void __init setup_arch(char **cmdline_p)
334 unsigned long low_mem_size;
336 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
337 drive_info = DRIVE_INFO;
338 screen_info = SCREEN_INFO;
339 edid_info = EDID_INFO;
340 aux_device_present = AUX_DEVICE_INFO;
341 saved_video_mode = SAVED_VIDEO_MODE;
343 #ifdef CONFIG_BLK_DEV_RAM
344 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
345 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
346 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
347 #endif
348 setup_memory_region();
350 if (!MOUNT_ROOT_RDONLY)
351 root_mountflags &= ~MS_RDONLY;
352 init_mm.start_code = (unsigned long) &_text;
353 init_mm.end_code = (unsigned long) &_etext;
354 init_mm.end_data = (unsigned long) &_edata;
355 init_mm.brk = (unsigned long) &_end;
357 code_resource.start = virt_to_phys(&_text);
358 code_resource.end = virt_to_phys(&_etext)-1;
359 data_resource.start = virt_to_phys(&_etext);
360 data_resource.end = virt_to_phys(&_edata)-1;
362 parse_cmdline_early(cmdline_p);
365 * partially used pages are not usable - thus
366 * we are rounding upwards:
368 end_pfn = e820_end_of_ram();
370 init_memory_mapping();
372 #ifdef CONFIG_DISCONTIGMEM
373 numa_initmem_init(0, end_pfn);
374 #else
375 contig_initmem_init();
376 #endif
378 /* Reserve direct mapping */
379 reserve_bootmem_generic(table_start << PAGE_SHIFT,
380 (table_end - table_start) << PAGE_SHIFT);
382 /* reserve kernel */
383 unsigned long kernel_end;
384 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
385 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
388 * reserve physical page 0 - it's a special BIOS page on many boxes,
389 * enabling clean reboots, SMP operation, laptop functions.
391 reserve_bootmem_generic(0, PAGE_SIZE);
393 #ifdef CONFIG_SMP
395 * But first pinch a few for the stack/trampoline stuff
396 * FIXME: Don't need the extra page at 4K, but need to fix
397 * trampoline before removing it. (see the GDT stuff)
399 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
401 /* Reserve SMP trampoline */
402 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
403 #endif
405 #ifdef CONFIG_ACPI_SLEEP
407 * Reserve low memory region for sleep support.
409 acpi_reserve_bootmem();
410 #endif
411 #ifdef CONFIG_X86_LOCAL_APIC
413 * Find and reserve possible boot-time SMP configuration:
415 find_smp_config();
416 #endif
417 #ifdef CONFIG_BLK_DEV_INITRD
418 if (LOADER_TYPE && INITRD_START) {
419 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
420 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
421 initrd_start =
422 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
423 initrd_end = initrd_start+INITRD_SIZE;
425 else {
426 printk(KERN_ERR "initrd extends beyond end of memory "
427 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
428 (unsigned long)(INITRD_START + INITRD_SIZE),
429 (unsigned long)(end_pfn << PAGE_SHIFT));
430 initrd_start = 0;
433 #endif
435 paging_init();
437 #ifndef CONFIG_SMP
438 /* Temporary hack: disable the IO-APIC for UP Nvidia and
439 This is until we sort out the ACPI problems. */
440 if (!acpi_disabled)
441 check_ioapic();
442 #endif
443 #ifdef CONFIG_ACPI_BOOT
445 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
446 * Must do this after paging_init (due to reliance on fixmap, and thus
447 * the bootmem allocator) but before get_smp_config (to allow parsing
448 * of MADT).
450 if (!acpi_disabled)
451 acpi_boot_init();
452 #endif
453 #ifdef CONFIG_X86_LOCAL_APIC
455 * get boot-time SMP configuration:
457 if (smp_found_config)
458 get_smp_config();
459 init_apic_mappings();
460 #endif
463 * Request address space for all standard RAM and ROM resources
464 * and also for regions reported as reserved by the e820.
466 probe_roms();
467 e820_reserve_resources();
469 request_resource(&iomem_resource, &vram_resource);
472 unsigned i;
473 /* request I/O space for devices used on all i[345]86 PCs */
474 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
475 request_resource(&ioport_resource, standard_io_resources+i);
478 /* Will likely break when you have unassigned resources with more
479 than 4GB memory and bridges that don't support more than 4GB.
480 Doing it properly would require to use pci_alloc_consistent
481 in this case. */
482 low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
483 if (low_mem_size > pci_mem_start)
484 pci_mem_start = low_mem_size;
486 #ifdef CONFIG_GART_IOMMU
487 iommu_hole_init();
488 #endif
490 #ifdef CONFIG_VT
491 #if defined(CONFIG_VGA_CONSOLE)
492 conswitchp = &vga_con;
493 #elif defined(CONFIG_DUMMY_CONSOLE)
494 conswitchp = &dummy_con;
495 #endif
496 #endif
499 static int __init get_model_name(struct cpuinfo_x86 *c)
501 unsigned int *v;
503 if (cpuid_eax(0x80000000) < 0x80000004)
504 return 0;
506 v = (unsigned int *) c->x86_model_id;
507 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
508 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
509 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
510 c->x86_model_id[48] = 0;
511 return 1;
515 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
517 unsigned int n, dummy, eax, ebx, ecx, edx;
519 n = cpuid_eax(0x80000000);
521 if (n >= 0x80000005) {
522 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
523 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
524 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
525 c->x86_cache_size=(ecx>>24)+(edx>>24);
526 /* DTLB and ITLB together, but only 4K */
527 c->x86_tlbsize = ((ebx>>16)&0xff) + (ebx&0xff);
530 if (n >= 0x80000006) {
531 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
532 ecx = cpuid_ecx(0x80000006);
533 c->x86_cache_size = ecx >> 16;
534 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
536 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
537 c->x86_cache_size, ecx & 0xFF);
540 if (n >= 0x80000007)
541 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
542 if (n >= 0x80000008) {
543 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
544 c->x86_virt_bits = (eax >> 8) & 0xff;
545 c->x86_phys_bits = eax & 0xff;
550 static int __init init_amd(struct cpuinfo_x86 *c)
552 int r;
553 int level;
555 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
556 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
557 clear_bit(0*32+31, &c->x86_capability);
559 /* C-stepping K8? */
560 level = cpuid_eax(1);
561 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
562 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
564 r = get_model_name(c);
565 if (!r) {
566 switch (c->x86) {
567 case 15:
568 /* Should distinguish Models here, but this is only
569 a fallback anyways. */
570 strcpy(c->x86_model_id, "Hammer");
571 break;
574 display_cacheinfo(c);
575 return r;
579 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
581 char *v = c->x86_vendor_id;
583 if (!strcmp(v, "AuthenticAMD"))
584 c->x86_vendor = X86_VENDOR_AMD;
585 else
586 c->x86_vendor = X86_VENDOR_UNKNOWN;
589 struct cpu_model_info {
590 int vendor;
591 int family;
592 char *model_names[16];
596 * This does the hard work of actually picking apart the CPU stuff...
598 void __init identify_cpu(struct cpuinfo_x86 *c)
600 int junk, i;
601 u32 xlvl, tfms;
603 c->loops_per_jiffy = loops_per_jiffy;
604 c->x86_cache_size = -1;
605 c->x86_vendor = X86_VENDOR_UNKNOWN;
606 c->x86_model = c->x86_mask = 0; /* So far unknown... */
607 c->x86_vendor_id[0] = '\0'; /* Unset */
608 c->x86_model_id[0] = '\0'; /* Unset */
609 memset(&c->x86_capability, 0, sizeof c->x86_capability);
611 /* Get vendor name */
612 cpuid(0x00000000, &c->cpuid_level,
613 (int *)&c->x86_vendor_id[0],
614 (int *)&c->x86_vendor_id[8],
615 (int *)&c->x86_vendor_id[4]);
617 get_cpu_vendor(c);
618 /* Initialize the standard set of capabilities */
619 /* Note that the vendor-specific code below might override */
621 /* Intel-defined flags: level 0x00000001 */
622 if (c->cpuid_level >= 0x00000001) {
623 __u32 misc;
624 cpuid(0x00000001, &tfms, &misc, &junk,
625 &c->x86_capability[0]);
626 c->x86 = (tfms >> 8) & 0xf;
627 c->x86_model = (tfms >> 4) & 0xf;
628 c->x86_mask = tfms & 0xf;
629 if (c->x86 == 0xf) {
630 c->x86 += (tfms >> 20) & 0xff;
631 c->x86_model += ((tfms >> 16) & 0xF) << 4;
633 if (c->x86_capability[0] & (1<<19))
634 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
635 } else {
636 /* Have CPUID level 0 only - unheard of */
637 c->x86 = 4;
640 /* AMD-defined flags: level 0x80000001 */
641 xlvl = cpuid_eax(0x80000000);
642 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
643 if ( xlvl >= 0x80000001 )
644 c->x86_capability[1] = cpuid_edx(0x80000001);
645 if ( xlvl >= 0x80000004 )
646 get_model_name(c); /* Default name */
649 /* Transmeta-defined flags: level 0x80860001 */
650 xlvl = cpuid_eax(0x80860000);
651 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
652 if ( xlvl >= 0x80860001 )
653 c->x86_capability[2] = cpuid_edx(0x80860001);
658 * Vendor-specific initialization. In this section we
659 * canonicalize the feature flags, meaning if there are
660 * features a certain CPU supports which CPUID doesn't
661 * tell us, CPUID claiming incorrect flags, or other bugs,
662 * we handle them here.
664 * At the end of this section, c->x86_capability better
665 * indicate the features this CPU genuinely supports!
667 switch ( c->x86_vendor ) {
669 case X86_VENDOR_AMD:
670 init_amd(c);
671 break;
673 case X86_VENDOR_UNKNOWN:
674 default:
675 /* Not much we can do here... */
676 break;
680 * On SMP, boot_cpu_data holds the common feature set between
681 * all CPUs; so make sure that we indicate which features are
682 * common between the CPUs. The first time this routine gets
683 * executed, c == &boot_cpu_data.
685 if ( c != &boot_cpu_data ) {
686 /* AND the already accumulated flags with these */
687 for ( i = 0 ; i < NCAPINTS ; i++ )
688 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
694 void __init print_cpu_info(struct cpuinfo_x86 *c)
696 if (c->x86_model_id[0])
697 printk("%s", c->x86_model_id);
699 if (c->x86_mask || c->cpuid_level >= 0)
700 printk(" stepping %02x\n", c->x86_mask);
701 else
702 printk("\n");
706 * Get CPU information for use by the procfs.
709 static int show_cpuinfo(struct seq_file *m, void *v)
711 struct cpuinfo_x86 *c = v;
714 * These flag bits must match the definitions in <asm/cpufeature.h>.
715 * NULL means this bit is undefined or reserved; either way it doesn't
716 * have meaning as far as Linux is concerned. Note that it's important
717 * to realize there is a difference between this table and CPUID -- if
718 * applications want to get the raw CPUID data, they should access
719 * /dev/cpu/<cpu_nr>/cpuid instead.
721 static char *x86_cap_flags[] = {
722 /* Intel-defined */
723 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
724 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
725 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
726 "fxsr", "sse", "sse2", "ss", NULL, "tm", "ia64", NULL,
728 /* AMD-defined */
729 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
730 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
731 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
732 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
734 /* Transmeta-defined */
735 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
736 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
737 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
738 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
740 /* Other (Linux-defined) */
741 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
742 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
743 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
746 static char *x86_power_flags[] = {
747 "ts", /* temperature sensor */
748 "fid", /* frequency id control */
749 "vid", /* voltage id control */
750 "ttp", /* thermal trip */
754 #ifdef CONFIG_SMP
755 if (!cpu_online(c-cpu_data))
756 return 0;
757 #endif
759 seq_printf(m,"processor\t: %u\n"
760 "vendor_id\t: %s\n"
761 "cpu family\t: %d\n"
762 "model\t\t: %d\n"
763 "model name\t: %s\n",
764 (unsigned)(c-cpu_data),
765 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
766 c->x86,
767 (int)c->x86_model,
768 c->x86_model_id[0] ? c->x86_model_id : "unknown");
770 if (c->x86_mask || c->cpuid_level >= 0)
771 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
772 else
773 seq_printf(m, "stepping\t: unknown\n");
775 if (cpu_has(c,X86_FEATURE_TSC)) {
776 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
777 cpu_khz / 1000, (cpu_khz % 1000));
780 /* Cache size */
781 if (c->x86_cache_size >= 0)
782 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
784 seq_printf(m,
785 "fpu\t\t: yes\n"
786 "fpu_exception\t: yes\n"
787 "cpuid level\t: %d\n"
788 "wp\t\t: yes\n"
789 "flags\t\t:",
790 c->cpuid_level);
793 int i;
794 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
795 if ( test_bit(i, &c->x86_capability) &&
796 x86_cap_flags[i] != NULL )
797 seq_printf(m, " %s", x86_cap_flags[i]);
800 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
801 c->loops_per_jiffy/(500000/HZ),
802 (c->loops_per_jiffy/(5000/HZ)) % 100);
804 if (c->x86_tlbsize > 0)
805 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
806 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
808 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
809 c->x86_phys_bits, c->x86_virt_bits);
811 seq_printf(m, "power management:");
813 unsigned i;
814 for (i = 0; i < 32; i++)
815 if (c->x86_power & (1 << i)) {
816 if (i < ARRAY_SIZE(x86_power_flags))
817 seq_printf(m, " %s", x86_power_flags[i]);
818 else
819 seq_printf(m, " [%d]", i);
823 seq_printf(m, "\n\n");
825 return 0;
828 static void *c_start(struct seq_file *m, loff_t *pos)
830 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
833 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
835 ++*pos;
836 return c_start(m, pos);
839 static void c_stop(struct seq_file *m, void *v)
843 struct seq_operations cpuinfo_op = {
844 .start =c_start,
845 .next = c_next,
846 .stop = c_stop,
847 .show = show_cpuinfo,