[PATCH] fbdev: Firmware EDID fixes
[firewire-audio.git] / arch / x86_64 / kernel / setup.c
blob143c65031539825b70f14a68370312ebddf5f9ea
1 /*
2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
9 * $Id$
13 * This file handles the architecture-dependent parts of initialization
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/crash_dump.h>
39 #include <linux/root_dev.h>
40 #include <linux/pci.h>
41 #include <linux/acpi.h>
42 #include <linux/kallsyms.h>
43 #include <linux/edd.h>
44 #include <linux/mmzone.h>
45 #include <linux/kexec.h>
46 #include <linux/cpufreq.h>
47 #include <linux/dmi.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/ctype.h>
51 #include <asm/mtrr.h>
52 #include <asm/uaccess.h>
53 #include <asm/system.h>
54 #include <asm/io.h>
55 #include <asm/smp.h>
56 #include <asm/msr.h>
57 #include <asm/desc.h>
58 #include <video/edid.h>
59 #include <asm/e820.h>
60 #include <asm/dma.h>
61 #include <asm/mpspec.h>
62 #include <asm/mmu_context.h>
63 #include <asm/bootsetup.h>
64 #include <asm/proto.h>
65 #include <asm/setup.h>
66 #include <asm/mach_apic.h>
67 #include <asm/numa.h>
68 #include <asm/swiotlb.h>
69 #include <asm/sections.h>
70 #include <asm/gart-mapping.h>
71 #include <asm/dmi.h>
74 * Machine setup..
77 struct cpuinfo_x86 boot_cpu_data __read_mostly;
79 unsigned long mmu_cr4_features;
81 int acpi_disabled;
82 EXPORT_SYMBOL(acpi_disabled);
83 #ifdef CONFIG_ACPI
84 extern int __initdata acpi_ht;
85 extern acpi_interrupt_flags acpi_sci_flags;
86 int __initdata acpi_force = 0;
87 #endif
89 int acpi_numa __initdata;
91 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
92 int bootloader_type;
94 unsigned long saved_video_mode;
96 /*
97 * Early DMI memory
99 int dmi_alloc_index;
100 char dmi_alloc_data[DMI_MAX_DATA];
103 * Setup options
105 struct screen_info screen_info;
106 struct sys_desc_table_struct {
107 unsigned short length;
108 unsigned char table[0];
111 struct edid_info edid_info;
112 EXPORT_SYMBOL_GPL(edid_info);
113 struct e820map e820;
115 extern int root_mountflags;
117 char command_line[COMMAND_LINE_SIZE];
119 struct resource standard_io_resources[] = {
120 { .name = "dma1", .start = 0x00, .end = 0x1f,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "pic1", .start = 0x20, .end = 0x21,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "timer0", .start = 0x40, .end = 0x43,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "timer1", .start = 0x50, .end = 0x53,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "keyboard", .start = 0x60, .end = 0x6f,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "pic2", .start = 0xa0, .end = 0xa1,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
134 { .name = "dma2", .start = 0xc0, .end = 0xdf,
135 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
136 { .name = "fpu", .start = 0xf0, .end = 0xff,
137 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
140 #define STANDARD_IO_RESOURCES \
141 (sizeof standard_io_resources / sizeof standard_io_resources[0])
143 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
145 struct resource data_resource = {
146 .name = "Kernel data",
147 .start = 0,
148 .end = 0,
149 .flags = IORESOURCE_RAM,
151 struct resource code_resource = {
152 .name = "Kernel code",
153 .start = 0,
154 .end = 0,
155 .flags = IORESOURCE_RAM,
158 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
160 static struct resource system_rom_resource = {
161 .name = "System ROM",
162 .start = 0xf0000,
163 .end = 0xfffff,
164 .flags = IORESOURCE_ROM,
167 static struct resource extension_rom_resource = {
168 .name = "Extension ROM",
169 .start = 0xe0000,
170 .end = 0xeffff,
171 .flags = IORESOURCE_ROM,
174 static struct resource adapter_rom_resources[] = {
175 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
176 .flags = IORESOURCE_ROM },
177 { .name = "Adapter ROM", .start = 0, .end = 0,
178 .flags = IORESOURCE_ROM },
179 { .name = "Adapter ROM", .start = 0, .end = 0,
180 .flags = IORESOURCE_ROM },
181 { .name = "Adapter ROM", .start = 0, .end = 0,
182 .flags = IORESOURCE_ROM },
183 { .name = "Adapter ROM", .start = 0, .end = 0,
184 .flags = IORESOURCE_ROM },
185 { .name = "Adapter ROM", .start = 0, .end = 0,
186 .flags = IORESOURCE_ROM }
189 #define ADAPTER_ROM_RESOURCES \
190 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
192 static struct resource video_rom_resource = {
193 .name = "Video ROM",
194 .start = 0xc0000,
195 .end = 0xc7fff,
196 .flags = IORESOURCE_ROM,
199 static struct resource video_ram_resource = {
200 .name = "Video RAM area",
201 .start = 0xa0000,
202 .end = 0xbffff,
203 .flags = IORESOURCE_RAM,
206 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
208 static int __init romchecksum(unsigned char *rom, unsigned long length)
210 unsigned char *p, sum = 0;
212 for (p = rom; p < rom + length; p++)
213 sum += *p;
214 return sum == 0;
217 static void __init probe_roms(void)
219 unsigned long start, length, upper;
220 unsigned char *rom;
221 int i;
223 /* video rom */
224 upper = adapter_rom_resources[0].start;
225 for (start = video_rom_resource.start; start < upper; start += 2048) {
226 rom = isa_bus_to_virt(start);
227 if (!romsignature(rom))
228 continue;
230 video_rom_resource.start = start;
232 /* 0 < length <= 0x7f * 512, historically */
233 length = rom[2] * 512;
235 /* if checksum okay, trust length byte */
236 if (length && romchecksum(rom, length))
237 video_rom_resource.end = start + length - 1;
239 request_resource(&iomem_resource, &video_rom_resource);
240 break;
243 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
244 if (start < upper)
245 start = upper;
247 /* system rom */
248 request_resource(&iomem_resource, &system_rom_resource);
249 upper = system_rom_resource.start;
251 /* check for extension rom (ignore length byte!) */
252 rom = isa_bus_to_virt(extension_rom_resource.start);
253 if (romsignature(rom)) {
254 length = extension_rom_resource.end - extension_rom_resource.start + 1;
255 if (romchecksum(rom, length)) {
256 request_resource(&iomem_resource, &extension_rom_resource);
257 upper = extension_rom_resource.start;
261 /* check for adapter roms on 2k boundaries */
262 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
263 rom = isa_bus_to_virt(start);
264 if (!romsignature(rom))
265 continue;
267 /* 0 < length <= 0x7f * 512, historically */
268 length = rom[2] * 512;
270 /* but accept any length that fits if checksum okay */
271 if (!length || start + length > upper || !romchecksum(rom, length))
272 continue;
274 adapter_rom_resources[i].start = start;
275 adapter_rom_resources[i].end = start + length - 1;
276 request_resource(&iomem_resource, &adapter_rom_resources[i]);
278 start = adapter_rom_resources[i++].end & ~2047UL;
282 /* Check for full argument with no trailing characters */
283 static int fullarg(char *p, char *arg)
285 int l = strlen(arg);
286 return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
289 static __init void parse_cmdline_early (char ** cmdline_p)
291 char c = ' ', *to = command_line, *from = COMMAND_LINE;
292 int len = 0;
293 int userdef = 0;
295 for (;;) {
296 if (c != ' ')
297 goto next_char;
299 #ifdef CONFIG_SMP
301 * If the BIOS enumerates physical processors before logical,
302 * maxcpus=N at enumeration-time can be used to disable HT.
304 else if (!memcmp(from, "maxcpus=", 8)) {
305 extern unsigned int maxcpus;
307 maxcpus = simple_strtoul(from + 8, NULL, 0);
309 #endif
310 #ifdef CONFIG_ACPI
311 /* "acpi=off" disables both ACPI table parsing and interpreter init */
312 if (fullarg(from,"acpi=off"))
313 disable_acpi();
315 if (fullarg(from, "acpi=force")) {
316 /* add later when we do DMI horrors: */
317 acpi_force = 1;
318 acpi_disabled = 0;
321 /* acpi=ht just means: do ACPI MADT parsing
322 at bootup, but don't enable the full ACPI interpreter */
323 if (fullarg(from, "acpi=ht")) {
324 if (!acpi_force)
325 disable_acpi();
326 acpi_ht = 1;
328 else if (fullarg(from, "pci=noacpi"))
329 acpi_disable_pci();
330 else if (fullarg(from, "acpi=noirq"))
331 acpi_noirq_set();
333 else if (fullarg(from, "acpi_sci=edge"))
334 acpi_sci_flags.trigger = 1;
335 else if (fullarg(from, "acpi_sci=level"))
336 acpi_sci_flags.trigger = 3;
337 else if (fullarg(from, "acpi_sci=high"))
338 acpi_sci_flags.polarity = 1;
339 else if (fullarg(from, "acpi_sci=low"))
340 acpi_sci_flags.polarity = 3;
342 /* acpi=strict disables out-of-spec workarounds */
343 else if (fullarg(from, "acpi=strict")) {
344 acpi_strict = 1;
346 #ifdef CONFIG_X86_IO_APIC
347 else if (fullarg(from, "acpi_skip_timer_override"))
348 acpi_skip_timer_override = 1;
349 #endif
350 #endif
352 if (fullarg(from, "disable_timer_pin_1"))
353 disable_timer_pin_1 = 1;
354 if (fullarg(from, "enable_timer_pin_1"))
355 disable_timer_pin_1 = -1;
357 if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
358 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
359 disable_apic = 1;
362 if (fullarg(from, "noapic"))
363 skip_ioapic_setup = 1;
365 if (fullarg(from,"apic")) {
366 skip_ioapic_setup = 0;
367 ioapic_force = 1;
370 if (!memcmp(from, "mem=", 4))
371 parse_memopt(from+4, &from);
373 if (!memcmp(from, "memmap=", 7)) {
374 /* exactmap option is for used defined memory */
375 if (!memcmp(from+7, "exactmap", 8)) {
376 #ifdef CONFIG_CRASH_DUMP
377 /* If we are doing a crash dump, we
378 * still need to know the real mem
379 * size before original memory map is
380 * reset.
382 saved_max_pfn = e820_end_of_ram();
383 #endif
384 from += 8+7;
385 end_pfn_map = 0;
386 e820.nr_map = 0;
387 userdef = 1;
389 else {
390 parse_memmapopt(from+7, &from);
391 userdef = 1;
395 #ifdef CONFIG_NUMA
396 if (!memcmp(from, "numa=", 5))
397 numa_setup(from+5);
398 #endif
400 if (!memcmp(from,"iommu=",6)) {
401 iommu_setup(from+6);
404 if (fullarg(from,"oops=panic"))
405 panic_on_oops = 1;
407 if (!memcmp(from, "noexec=", 7))
408 nonx_setup(from + 7);
410 #ifdef CONFIG_KEXEC
411 /* crashkernel=size@addr specifies the location to reserve for
412 * a crash kernel. By reserving this memory we guarantee
413 * that linux never set's it up as a DMA target.
414 * Useful for holding code to do something appropriate
415 * after a kernel panic.
417 else if (!memcmp(from, "crashkernel=", 12)) {
418 unsigned long size, base;
419 size = memparse(from+12, &from);
420 if (*from == '@') {
421 base = memparse(from+1, &from);
422 /* FIXME: Do I want a sanity check
423 * to validate the memory range?
425 crashk_res.start = base;
426 crashk_res.end = base + size - 1;
429 #endif
431 #ifdef CONFIG_PROC_VMCORE
432 /* elfcorehdr= specifies the location of elf core header
433 * stored by the crashed kernel. This option will be passed
434 * by kexec loader to the capture kernel.
436 else if(!memcmp(from, "elfcorehdr=", 11))
437 elfcorehdr_addr = memparse(from+11, &from);
438 #endif
440 #ifdef CONFIG_HOTPLUG_CPU
441 else if (!memcmp(from, "additional_cpus=", 16))
442 setup_additional_cpus(from+16);
443 #endif
445 next_char:
446 c = *(from++);
447 if (!c)
448 break;
449 if (COMMAND_LINE_SIZE <= ++len)
450 break;
451 *(to++) = c;
453 if (userdef) {
454 printk(KERN_INFO "user-defined physical RAM map:\n");
455 e820_print_map("user");
457 *to = '\0';
458 *cmdline_p = command_line;
461 #ifndef CONFIG_NUMA
462 static void __init
463 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
465 unsigned long bootmap_size, bootmap;
467 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
468 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
469 if (bootmap == -1L)
470 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
471 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
472 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
473 reserve_bootmem(bootmap, bootmap_size);
475 #endif
477 /* Use inline assembly to define this because the nops are defined
478 as inline assembly strings in the include files and we cannot
479 get them easily into strings. */
480 asm("\t.data\nk8nops: "
481 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
482 K8_NOP7 K8_NOP8);
484 extern unsigned char k8nops[];
485 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
486 NULL,
487 k8nops,
488 k8nops + 1,
489 k8nops + 1 + 2,
490 k8nops + 1 + 2 + 3,
491 k8nops + 1 + 2 + 3 + 4,
492 k8nops + 1 + 2 + 3 + 4 + 5,
493 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
494 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
497 extern char __vsyscall_0;
499 /* Replace instructions with better alternatives for this CPU type.
501 This runs before SMP is initialized to avoid SMP problems with
502 self modifying code. This implies that assymetric systems where
503 APs have less capabilities than the boot processor are not handled.
504 In this case boot with "noreplacement". */
505 void apply_alternatives(void *start, void *end)
507 struct alt_instr *a;
508 int diff, i, k;
509 for (a = start; (void *)a < end; a++) {
510 u8 *instr;
512 if (!boot_cpu_has(a->cpuid))
513 continue;
515 BUG_ON(a->replacementlen > a->instrlen);
516 instr = a->instr;
517 /* vsyscall code is not mapped yet. resolve it manually. */
518 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
519 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
520 __inline_memcpy(instr, a->replacement, a->replacementlen);
521 diff = a->instrlen - a->replacementlen;
523 /* Pad the rest with nops */
524 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
525 k = diff;
526 if (k > ASM_NOP_MAX)
527 k = ASM_NOP_MAX;
528 __inline_memcpy(instr + i, k8_nops[k], k);
533 static int no_replacement __initdata = 0;
535 void __init alternative_instructions(void)
537 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
538 if (no_replacement)
539 return;
540 apply_alternatives(__alt_instructions, __alt_instructions_end);
543 static int __init noreplacement_setup(char *s)
545 no_replacement = 1;
546 return 1;
549 __setup("noreplacement", noreplacement_setup);
551 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
552 struct edd edd;
553 #ifdef CONFIG_EDD_MODULE
554 EXPORT_SYMBOL(edd);
555 #endif
557 * copy_edd() - Copy the BIOS EDD information
558 * from boot_params into a safe place.
561 static inline void copy_edd(void)
563 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
564 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
565 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
566 edd.edd_info_nr = EDD_NR;
568 #else
569 static inline void copy_edd(void)
572 #endif
574 #define EBDA_ADDR_POINTER 0x40E
576 unsigned __initdata ebda_addr;
577 unsigned __initdata ebda_size;
579 static void discover_ebda(void)
582 * there is a real-mode segmented pointer pointing to the
583 * 4K EBDA area at 0x40E
585 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
586 ebda_addr <<= 4;
588 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
590 /* Round EBDA up to pages */
591 if (ebda_size == 0)
592 ebda_size = 1;
593 ebda_size <<= 10;
594 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
595 if (ebda_size > 64*1024)
596 ebda_size = 64*1024;
599 void __init setup_arch(char **cmdline_p)
601 unsigned long kernel_end;
603 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
604 screen_info = SCREEN_INFO;
605 edid_info = EDID_INFO;
606 saved_video_mode = SAVED_VIDEO_MODE;
607 bootloader_type = LOADER_TYPE;
609 #ifdef CONFIG_BLK_DEV_RAM
610 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
611 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
612 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
613 #endif
614 setup_memory_region();
615 copy_edd();
617 if (!MOUNT_ROOT_RDONLY)
618 root_mountflags &= ~MS_RDONLY;
619 init_mm.start_code = (unsigned long) &_text;
620 init_mm.end_code = (unsigned long) &_etext;
621 init_mm.end_data = (unsigned long) &_edata;
622 init_mm.brk = (unsigned long) &_end;
624 code_resource.start = virt_to_phys(&_text);
625 code_resource.end = virt_to_phys(&_etext)-1;
626 data_resource.start = virt_to_phys(&_etext);
627 data_resource.end = virt_to_phys(&_edata)-1;
629 parse_cmdline_early(cmdline_p);
631 early_identify_cpu(&boot_cpu_data);
634 * partially used pages are not usable - thus
635 * we are rounding upwards:
637 end_pfn = e820_end_of_ram();
638 num_physpages = end_pfn; /* for pfn_valid */
640 check_efer();
642 discover_ebda();
644 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
646 dmi_scan_machine();
648 zap_low_mappings(0);
650 #ifdef CONFIG_ACPI
652 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
653 * Call this early for SRAT node setup.
655 acpi_boot_table_init();
656 #endif
658 #ifdef CONFIG_ACPI_NUMA
660 * Parse SRAT to discover nodes.
662 acpi_numa_init();
663 #endif
665 #ifdef CONFIG_NUMA
666 numa_initmem_init(0, end_pfn);
667 #else
668 contig_initmem_init(0, end_pfn);
669 #endif
671 /* Reserve direct mapping */
672 reserve_bootmem_generic(table_start << PAGE_SHIFT,
673 (table_end - table_start) << PAGE_SHIFT);
675 /* reserve kernel */
676 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
677 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
680 * reserve physical page 0 - it's a special BIOS page on many boxes,
681 * enabling clean reboots, SMP operation, laptop functions.
683 reserve_bootmem_generic(0, PAGE_SIZE);
685 /* reserve ebda region */
686 if (ebda_addr)
687 reserve_bootmem_generic(ebda_addr, ebda_size);
689 #ifdef CONFIG_SMP
691 * But first pinch a few for the stack/trampoline stuff
692 * FIXME: Don't need the extra page at 4K, but need to fix
693 * trampoline before removing it. (see the GDT stuff)
695 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
697 /* Reserve SMP trampoline */
698 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
699 #endif
701 #ifdef CONFIG_ACPI_SLEEP
703 * Reserve low memory region for sleep support.
705 acpi_reserve_bootmem();
706 #endif
707 #ifdef CONFIG_X86_LOCAL_APIC
709 * Find and reserve possible boot-time SMP configuration:
711 find_smp_config();
712 #endif
713 #ifdef CONFIG_BLK_DEV_INITRD
714 if (LOADER_TYPE && INITRD_START) {
715 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
716 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
717 initrd_start =
718 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
719 initrd_end = initrd_start+INITRD_SIZE;
721 else {
722 printk(KERN_ERR "initrd extends beyond end of memory "
723 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
724 (unsigned long)(INITRD_START + INITRD_SIZE),
725 (unsigned long)(end_pfn << PAGE_SHIFT));
726 initrd_start = 0;
729 #endif
730 #ifdef CONFIG_KEXEC
731 if (crashk_res.start != crashk_res.end) {
732 reserve_bootmem_generic(crashk_res.start,
733 crashk_res.end - crashk_res.start + 1);
735 #endif
737 paging_init();
739 check_ioapic();
742 * set this early, so we dont allocate cpu0
743 * if MADT list doesnt list BSP first
744 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
746 cpu_set(0, cpu_present_map);
747 #ifdef CONFIG_ACPI
749 * Read APIC and some other early information from ACPI tables.
751 acpi_boot_init();
752 #endif
754 init_cpu_to_node();
756 #ifdef CONFIG_X86_LOCAL_APIC
758 * get boot-time SMP configuration:
760 if (smp_found_config)
761 get_smp_config();
762 init_apic_mappings();
763 #endif
766 * Request address space for all standard RAM and ROM resources
767 * and also for regions reported as reserved by the e820.
769 probe_roms();
770 e820_reserve_resources();
772 request_resource(&iomem_resource, &video_ram_resource);
775 unsigned i;
776 /* request I/O space for devices used on all i[345]86 PCs */
777 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
778 request_resource(&ioport_resource, &standard_io_resources[i]);
781 e820_setup_gap();
783 #ifdef CONFIG_GART_IOMMU
784 iommu_hole_init();
785 #endif
787 #ifdef CONFIG_VT
788 #if defined(CONFIG_VGA_CONSOLE)
789 conswitchp = &vga_con;
790 #elif defined(CONFIG_DUMMY_CONSOLE)
791 conswitchp = &dummy_con;
792 #endif
793 #endif
796 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
798 unsigned int *v;
800 if (c->extended_cpuid_level < 0x80000004)
801 return 0;
803 v = (unsigned int *) c->x86_model_id;
804 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
805 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
806 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
807 c->x86_model_id[48] = 0;
808 return 1;
812 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
814 unsigned int n, dummy, eax, ebx, ecx, edx;
816 n = c->extended_cpuid_level;
818 if (n >= 0x80000005) {
819 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
820 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
821 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
822 c->x86_cache_size=(ecx>>24)+(edx>>24);
823 /* On K8 L1 TLB is inclusive, so don't count it */
824 c->x86_tlbsize = 0;
827 if (n >= 0x80000006) {
828 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
829 ecx = cpuid_ecx(0x80000006);
830 c->x86_cache_size = ecx >> 16;
831 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
833 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
834 c->x86_cache_size, ecx & 0xFF);
837 if (n >= 0x80000007)
838 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
839 if (n >= 0x80000008) {
840 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
841 c->x86_virt_bits = (eax >> 8) & 0xff;
842 c->x86_phys_bits = eax & 0xff;
846 #ifdef CONFIG_NUMA
847 static int nearby_node(int apicid)
849 int i;
850 for (i = apicid - 1; i >= 0; i--) {
851 int node = apicid_to_node[i];
852 if (node != NUMA_NO_NODE && node_online(node))
853 return node;
855 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
856 int node = apicid_to_node[i];
857 if (node != NUMA_NO_NODE && node_online(node))
858 return node;
860 return first_node(node_online_map); /* Shouldn't happen */
862 #endif
865 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
866 * Assumes number of cores is a power of two.
868 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
870 #ifdef CONFIG_SMP
871 int cpu = smp_processor_id();
872 unsigned bits;
873 #ifdef CONFIG_NUMA
874 int node = 0;
875 unsigned apicid = hard_smp_processor_id();
876 #endif
878 bits = 0;
879 while ((1 << bits) < c->x86_max_cores)
880 bits++;
882 /* Low order bits define the core id (index of core in socket) */
883 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
884 /* Convert the APIC ID into the socket ID */
885 phys_proc_id[cpu] = phys_pkg_id(bits);
887 #ifdef CONFIG_NUMA
888 node = phys_proc_id[cpu];
889 if (apicid_to_node[apicid] != NUMA_NO_NODE)
890 node = apicid_to_node[apicid];
891 if (!node_online(node)) {
892 /* Two possibilities here:
893 - The CPU is missing memory and no node was created.
894 In that case try picking one from a nearby CPU
895 - The APIC IDs differ from the HyperTransport node IDs
896 which the K8 northbridge parsing fills in.
897 Assume they are all increased by a constant offset,
898 but in the same order as the HT nodeids.
899 If that doesn't result in a usable node fall back to the
900 path for the previous case. */
901 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
902 if (ht_nodeid >= 0 &&
903 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
904 node = apicid_to_node[ht_nodeid];
905 /* Pick a nearby node */
906 if (!node_online(node))
907 node = nearby_node(apicid);
909 numa_set_node(cpu, node);
911 printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
912 cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
913 #endif
914 #endif
917 static int __init init_amd(struct cpuinfo_x86 *c)
919 int r;
920 unsigned level;
922 #ifdef CONFIG_SMP
923 unsigned long value;
926 * Disable TLB flush filter by setting HWCR.FFDIS on K8
927 * bit 6 of msr C001_0015
929 * Errata 63 for SH-B3 steppings
930 * Errata 122 for all steppings (F+ have it disabled by default)
932 if (c->x86 == 15) {
933 rdmsrl(MSR_K8_HWCR, value);
934 value |= 1 << 6;
935 wrmsrl(MSR_K8_HWCR, value);
937 #endif
939 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
940 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
941 clear_bit(0*32+31, &c->x86_capability);
943 /* On C+ stepping K8 rep microcode works well for copy/memset */
944 level = cpuid_eax(1);
945 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
946 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
948 /* Enable workaround for FXSAVE leak */
949 if (c->x86 >= 6)
950 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
952 r = get_model_name(c);
953 if (!r) {
954 switch (c->x86) {
955 case 15:
956 /* Should distinguish Models here, but this is only
957 a fallback anyways. */
958 strcpy(c->x86_model_id, "Hammer");
959 break;
962 display_cacheinfo(c);
964 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
965 if (c->x86_power & (1<<8))
966 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
968 if (c->extended_cpuid_level >= 0x80000008) {
969 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
971 amd_detect_cmp(c);
974 return r;
977 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
979 #ifdef CONFIG_SMP
980 u32 eax, ebx, ecx, edx;
981 int index_msb, core_bits;
982 int cpu = smp_processor_id();
984 cpuid(1, &eax, &ebx, &ecx, &edx);
987 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
988 return;
990 smp_num_siblings = (ebx & 0xff0000) >> 16;
992 if (smp_num_siblings == 1) {
993 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
994 } else if (smp_num_siblings > 1 ) {
996 if (smp_num_siblings > NR_CPUS) {
997 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
998 smp_num_siblings = 1;
999 return;
1002 index_msb = get_count_order(smp_num_siblings);
1003 phys_proc_id[cpu] = phys_pkg_id(index_msb);
1005 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1006 phys_proc_id[cpu]);
1008 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1010 index_msb = get_count_order(smp_num_siblings) ;
1012 core_bits = get_count_order(c->x86_max_cores);
1014 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
1015 ((1 << core_bits) - 1);
1017 if (c->x86_max_cores > 1)
1018 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1019 cpu_core_id[cpu]);
1021 #endif
1025 * find out the number of processor cores on the die
1027 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1029 unsigned int eax;
1031 if (c->cpuid_level < 4)
1032 return 1;
1034 __asm__("cpuid"
1035 : "=a" (eax)
1036 : "0" (4), "c" (0)
1037 : "bx", "dx");
1039 if (eax & 0x1f)
1040 return ((eax >> 26) + 1);
1041 else
1042 return 1;
1045 static void srat_detect_node(void)
1047 #ifdef CONFIG_NUMA
1048 unsigned node;
1049 int cpu = smp_processor_id();
1051 /* Don't do the funky fallback heuristics the AMD version employs
1052 for now. */
1053 node = apicid_to_node[hard_smp_processor_id()];
1054 if (node == NUMA_NO_NODE)
1055 node = first_node(node_online_map);
1056 numa_set_node(cpu, node);
1058 if (acpi_numa > 0)
1059 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
1060 #endif
1063 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1065 /* Cache sizes */
1066 unsigned n;
1068 init_intel_cacheinfo(c);
1069 n = c->extended_cpuid_level;
1070 if (n >= 0x80000008) {
1071 unsigned eax = cpuid_eax(0x80000008);
1072 c->x86_virt_bits = (eax >> 8) & 0xff;
1073 c->x86_phys_bits = eax & 0xff;
1074 /* CPUID workaround for Intel 0F34 CPU */
1075 if (c->x86_vendor == X86_VENDOR_INTEL &&
1076 c->x86 == 0xF && c->x86_model == 0x3 &&
1077 c->x86_mask == 0x4)
1078 c->x86_phys_bits = 36;
1081 if (c->x86 == 15)
1082 c->x86_cache_alignment = c->x86_clflush_size * 2;
1083 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1084 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1085 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1086 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1087 c->x86_max_cores = intel_num_cpu_cores(c);
1089 srat_detect_node();
1092 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1094 char *v = c->x86_vendor_id;
1096 if (!strcmp(v, "AuthenticAMD"))
1097 c->x86_vendor = X86_VENDOR_AMD;
1098 else if (!strcmp(v, "GenuineIntel"))
1099 c->x86_vendor = X86_VENDOR_INTEL;
1100 else
1101 c->x86_vendor = X86_VENDOR_UNKNOWN;
1104 struct cpu_model_info {
1105 int vendor;
1106 int family;
1107 char *model_names[16];
1110 /* Do some early cpuid on the boot CPU to get some parameter that are
1111 needed before check_bugs. Everything advanced is in identify_cpu
1112 below. */
1113 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1115 u32 tfms;
1117 c->loops_per_jiffy = loops_per_jiffy;
1118 c->x86_cache_size = -1;
1119 c->x86_vendor = X86_VENDOR_UNKNOWN;
1120 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1121 c->x86_vendor_id[0] = '\0'; /* Unset */
1122 c->x86_model_id[0] = '\0'; /* Unset */
1123 c->x86_clflush_size = 64;
1124 c->x86_cache_alignment = c->x86_clflush_size;
1125 c->x86_max_cores = 1;
1126 c->extended_cpuid_level = 0;
1127 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1129 /* Get vendor name */
1130 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1131 (unsigned int *)&c->x86_vendor_id[0],
1132 (unsigned int *)&c->x86_vendor_id[8],
1133 (unsigned int *)&c->x86_vendor_id[4]);
1135 get_cpu_vendor(c);
1137 /* Initialize the standard set of capabilities */
1138 /* Note that the vendor-specific code below might override */
1140 /* Intel-defined flags: level 0x00000001 */
1141 if (c->cpuid_level >= 0x00000001) {
1142 __u32 misc;
1143 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1144 &c->x86_capability[0]);
1145 c->x86 = (tfms >> 8) & 0xf;
1146 c->x86_model = (tfms >> 4) & 0xf;
1147 c->x86_mask = tfms & 0xf;
1148 if (c->x86 == 0xf)
1149 c->x86 += (tfms >> 20) & 0xff;
1150 if (c->x86 >= 0x6)
1151 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1152 if (c->x86_capability[0] & (1<<19))
1153 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1154 } else {
1155 /* Have CPUID level 0 only - unheard of */
1156 c->x86 = 4;
1159 #ifdef CONFIG_SMP
1160 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1161 #endif
1165 * This does the hard work of actually picking apart the CPU stuff...
1167 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1169 int i;
1170 u32 xlvl;
1172 early_identify_cpu(c);
1174 /* AMD-defined flags: level 0x80000001 */
1175 xlvl = cpuid_eax(0x80000000);
1176 c->extended_cpuid_level = xlvl;
1177 if ((xlvl & 0xffff0000) == 0x80000000) {
1178 if (xlvl >= 0x80000001) {
1179 c->x86_capability[1] = cpuid_edx(0x80000001);
1180 c->x86_capability[6] = cpuid_ecx(0x80000001);
1182 if (xlvl >= 0x80000004)
1183 get_model_name(c); /* Default name */
1186 /* Transmeta-defined flags: level 0x80860001 */
1187 xlvl = cpuid_eax(0x80860000);
1188 if ((xlvl & 0xffff0000) == 0x80860000) {
1189 /* Don't set x86_cpuid_level here for now to not confuse. */
1190 if (xlvl >= 0x80860001)
1191 c->x86_capability[2] = cpuid_edx(0x80860001);
1194 c->apicid = phys_pkg_id(0);
1197 * Vendor-specific initialization. In this section we
1198 * canonicalize the feature flags, meaning if there are
1199 * features a certain CPU supports which CPUID doesn't
1200 * tell us, CPUID claiming incorrect flags, or other bugs,
1201 * we handle them here.
1203 * At the end of this section, c->x86_capability better
1204 * indicate the features this CPU genuinely supports!
1206 switch (c->x86_vendor) {
1207 case X86_VENDOR_AMD:
1208 init_amd(c);
1209 break;
1211 case X86_VENDOR_INTEL:
1212 init_intel(c);
1213 break;
1215 case X86_VENDOR_UNKNOWN:
1216 default:
1217 display_cacheinfo(c);
1218 break;
1221 select_idle_routine(c);
1222 detect_ht(c);
1225 * On SMP, boot_cpu_data holds the common feature set between
1226 * all CPUs; so make sure that we indicate which features are
1227 * common between the CPUs. The first time this routine gets
1228 * executed, c == &boot_cpu_data.
1230 if (c != &boot_cpu_data) {
1231 /* AND the already accumulated flags with these */
1232 for (i = 0 ; i < NCAPINTS ; i++)
1233 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1236 #ifdef CONFIG_X86_MCE
1237 mcheck_init(c);
1238 #endif
1239 if (c == &boot_cpu_data)
1240 mtrr_bp_init();
1241 else
1242 mtrr_ap_init();
1243 #ifdef CONFIG_NUMA
1244 numa_add_cpu(smp_processor_id());
1245 #endif
1249 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1251 if (c->x86_model_id[0])
1252 printk("%s", c->x86_model_id);
1254 if (c->x86_mask || c->cpuid_level >= 0)
1255 printk(" stepping %02x\n", c->x86_mask);
1256 else
1257 printk("\n");
1261 * Get CPU information for use by the procfs.
1264 static int show_cpuinfo(struct seq_file *m, void *v)
1266 struct cpuinfo_x86 *c = v;
1269 * These flag bits must match the definitions in <asm/cpufeature.h>.
1270 * NULL means this bit is undefined or reserved; either way it doesn't
1271 * have meaning as far as Linux is concerned. Note that it's important
1272 * to realize there is a difference between this table and CPUID -- if
1273 * applications want to get the raw CPUID data, they should access
1274 * /dev/cpu/<cpu_nr>/cpuid instead.
1276 static char *x86_cap_flags[] = {
1277 /* Intel-defined */
1278 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1279 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1280 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1281 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1283 /* AMD-defined */
1284 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1285 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1286 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1287 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1289 /* Transmeta-defined */
1290 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1291 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1292 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1293 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1295 /* Other (Linux-defined) */
1296 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1297 "constant_tsc", NULL, NULL,
1298 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1299 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1300 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1302 /* Intel-defined (#2) */
1303 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1304 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1305 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1306 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1308 /* VIA/Cyrix/Centaur-defined */
1309 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1310 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1311 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1312 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1314 /* AMD-defined (#2) */
1315 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1316 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1317 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1320 static char *x86_power_flags[] = {
1321 "ts", /* temperature sensor */
1322 "fid", /* frequency id control */
1323 "vid", /* voltage id control */
1324 "ttp", /* thermal trip */
1325 "tm",
1326 "stc",
1327 NULL,
1328 /* nothing */ /* constant_tsc - moved to flags */
1332 #ifdef CONFIG_SMP
1333 if (!cpu_online(c-cpu_data))
1334 return 0;
1335 #endif
1337 seq_printf(m,"processor\t: %u\n"
1338 "vendor_id\t: %s\n"
1339 "cpu family\t: %d\n"
1340 "model\t\t: %d\n"
1341 "model name\t: %s\n",
1342 (unsigned)(c-cpu_data),
1343 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1344 c->x86,
1345 (int)c->x86_model,
1346 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1348 if (c->x86_mask || c->cpuid_level >= 0)
1349 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1350 else
1351 seq_printf(m, "stepping\t: unknown\n");
1353 if (cpu_has(c,X86_FEATURE_TSC)) {
1354 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1355 if (!freq)
1356 freq = cpu_khz;
1357 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1358 freq / 1000, (freq % 1000));
1361 /* Cache size */
1362 if (c->x86_cache_size >= 0)
1363 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1365 #ifdef CONFIG_SMP
1366 if (smp_num_siblings * c->x86_max_cores > 1) {
1367 int cpu = c - cpu_data;
1368 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1369 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1370 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1371 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1373 #endif
1375 seq_printf(m,
1376 "fpu\t\t: yes\n"
1377 "fpu_exception\t: yes\n"
1378 "cpuid level\t: %d\n"
1379 "wp\t\t: yes\n"
1380 "flags\t\t:",
1381 c->cpuid_level);
1384 int i;
1385 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1386 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1387 seq_printf(m, " %s", x86_cap_flags[i]);
1390 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1391 c->loops_per_jiffy/(500000/HZ),
1392 (c->loops_per_jiffy/(5000/HZ)) % 100);
1394 if (c->x86_tlbsize > 0)
1395 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1396 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1397 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1399 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1400 c->x86_phys_bits, c->x86_virt_bits);
1402 seq_printf(m, "power management:");
1404 unsigned i;
1405 for (i = 0; i < 32; i++)
1406 if (c->x86_power & (1 << i)) {
1407 if (i < ARRAY_SIZE(x86_power_flags) &&
1408 x86_power_flags[i])
1409 seq_printf(m, "%s%s",
1410 x86_power_flags[i][0]?" ":"",
1411 x86_power_flags[i]);
1412 else
1413 seq_printf(m, " [%d]", i);
1417 seq_printf(m, "\n\n");
1419 return 0;
1422 static void *c_start(struct seq_file *m, loff_t *pos)
1424 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1427 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1429 ++*pos;
1430 return c_start(m, pos);
1433 static void c_stop(struct seq_file *m, void *v)
1437 struct seq_operations cpuinfo_op = {
1438 .start =c_start,
1439 .next = c_next,
1440 .stop = c_stop,
1441 .show = show_cpuinfo,
1444 #ifdef CONFIG_INPUT_PCSPKR
1445 #include <linux/platform_device.h>
1446 static __init int add_pcspkr(void)
1448 struct platform_device *pd;
1449 int ret;
1451 pd = platform_device_alloc("pcspkr", -1);
1452 if (!pd)
1453 return -ENOMEM;
1455 ret = platform_device_add(pd);
1456 if (ret)
1457 platform_device_put(pd);
1459 return ret;
1461 device_initcall(add_pcspkr);
1462 #endif