x86: move mp_bus_id_to_local to numa.c
[linux-2.6/mini2440.git] / arch / x86 / kernel / mpparse_32.c
blobae385b4278416304255fb7764b60409bbd57d52a
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/acpi.h>
19 #include <linux/delay.h>
20 #include <linux/bootmem.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/bitops.h>
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
31 #include <mach_apic.h>
32 #include <mach_apicdef.h>
33 #include <mach_mpparse.h>
34 #include <bios_ebda.h>
36 /* Have we found an MP table */
37 int smp_found_config;
38 unsigned int __cpuinitdata maxcpus = NR_CPUS;
41 * Various Linux-internal data structures created from the
42 * MP-table.
44 int apic_version [MAX_APICS];
45 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
46 int mp_bus_id_to_type [MAX_MP_BUSSES];
47 #endif
48 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
49 int mp_bus_id_to_node [MAX_MP_BUSSES];
50 int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
51 static int mp_current_pci_id;
53 /* I/O APIC entries */
54 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
56 /* # of MP IRQ source entries */
57 struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
59 /* MP IRQ source entries */
60 int mp_irq_entries;
62 int nr_ioapics;
64 int pic_mode;
65 unsigned long mp_lapic_addr;
67 unsigned int def_to_bigsmp = 0;
69 /* Processor that is doing the boot up */
70 unsigned int boot_cpu_physical_apicid = -1U;
71 /* Internal processor count */
72 unsigned int num_processors;
74 unsigned disabled_cpus __cpuinitdata;
76 /* Bitmask of physically existing CPUs */
77 physid_mask_t phys_cpu_present_map;
79 u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
82 * Intel MP BIOS table parsing routines:
87 * Checksum an MP configuration block.
90 static int __init mpf_checksum(unsigned char *mp, int len)
92 int sum = 0;
94 while (len--)
95 sum += *mp++;
97 return sum & 0xFF;
101 * Have to match translation table entries to main table entries by counter
102 * hence the mpc_record variable .... can't see a less disgusting way of
103 * doing this ....
106 static int mpc_record;
107 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
109 static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
111 int ver, apicid;
112 physid_mask_t phys_cpu;
114 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
115 disabled_cpus++;
116 return;
119 apicid = mpc_apic_id(m, translation_table[mpc_record]);
121 if (m->mpc_featureflag&(1<<0))
122 Dprintk(" Floating point unit present.\n");
123 if (m->mpc_featureflag&(1<<7))
124 Dprintk(" Machine Exception supported.\n");
125 if (m->mpc_featureflag&(1<<8))
126 Dprintk(" 64 bit compare & exchange supported.\n");
127 if (m->mpc_featureflag&(1<<9))
128 Dprintk(" Internal APIC present.\n");
129 if (m->mpc_featureflag&(1<<11))
130 Dprintk(" SEP present.\n");
131 if (m->mpc_featureflag&(1<<12))
132 Dprintk(" MTRR present.\n");
133 if (m->mpc_featureflag&(1<<13))
134 Dprintk(" PGE present.\n");
135 if (m->mpc_featureflag&(1<<14))
136 Dprintk(" MCA present.\n");
137 if (m->mpc_featureflag&(1<<15))
138 Dprintk(" CMOV present.\n");
139 if (m->mpc_featureflag&(1<<16))
140 Dprintk(" PAT present.\n");
141 if (m->mpc_featureflag&(1<<17))
142 Dprintk(" PSE present.\n");
143 if (m->mpc_featureflag&(1<<18))
144 Dprintk(" PSN present.\n");
145 if (m->mpc_featureflag&(1<<19))
146 Dprintk(" Cache Line Flush Instruction present.\n");
147 /* 20 Reserved */
148 if (m->mpc_featureflag&(1<<21))
149 Dprintk(" Debug Trace and EMON Store present.\n");
150 if (m->mpc_featureflag&(1<<22))
151 Dprintk(" ACPI Thermal Throttle Registers present.\n");
152 if (m->mpc_featureflag&(1<<23))
153 Dprintk(" MMX present.\n");
154 if (m->mpc_featureflag&(1<<24))
155 Dprintk(" FXSR present.\n");
156 if (m->mpc_featureflag&(1<<25))
157 Dprintk(" XMM present.\n");
158 if (m->mpc_featureflag&(1<<26))
159 Dprintk(" Willamette New Instructions present.\n");
160 if (m->mpc_featureflag&(1<<27))
161 Dprintk(" Self Snoop present.\n");
162 if (m->mpc_featureflag&(1<<28))
163 Dprintk(" HT present.\n");
164 if (m->mpc_featureflag&(1<<29))
165 Dprintk(" Thermal Monitor present.\n");
166 /* 30, 31 Reserved */
169 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
170 Dprintk(" Bootup CPU\n");
171 boot_cpu_physical_apicid = m->mpc_apicid;
174 ver = m->mpc_apicver;
177 * Validate version
179 if (ver == 0x0) {
180 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
181 "fixing up to 0x10. (tell your hw vendor)\n",
182 m->mpc_apicid);
183 ver = 0x10;
185 apic_version[m->mpc_apicid] = ver;
187 phys_cpu = apicid_to_cpu_present(apicid);
188 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
190 if (num_processors >= NR_CPUS) {
191 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
192 " Processor ignored.\n", NR_CPUS);
193 return;
196 if (num_processors >= maxcpus) {
197 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
198 " Processor ignored.\n", maxcpus);
199 return;
202 cpu_set(num_processors, cpu_possible_map);
203 num_processors++;
206 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
207 * but we need to work other dependencies like SMP_SUSPEND etc
208 * before this can be done without some confusion.
209 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
210 * - Ashok Raj <ashok.raj@intel.com>
212 if (num_processors > 8) {
213 switch (boot_cpu_data.x86_vendor) {
214 case X86_VENDOR_INTEL:
215 if (!APIC_XAPIC(ver)) {
216 def_to_bigsmp = 0;
217 break;
219 /* If P4 and above fall through */
220 case X86_VENDOR_AMD:
221 def_to_bigsmp = 1;
224 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
227 static void __init MP_bus_info (struct mpc_config_bus *m)
229 char str[7];
231 memcpy(str, m->mpc_bustype, 6);
232 str[6] = 0;
234 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
236 #if MAX_MP_BUSSES < 256
237 if (m->mpc_busid >= MAX_MP_BUSSES) {
238 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
239 " is too large, max. supported is %d\n",
240 m->mpc_busid, str, MAX_MP_BUSSES - 1);
241 return;
243 #endif
245 set_bit(m->mpc_busid, mp_bus_not_pci);
246 if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
247 mpc_oem_pci_bus(m, translation_table[mpc_record]);
248 clear_bit(m->mpc_busid, mp_bus_not_pci);
249 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
250 mp_current_pci_id++;
251 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
252 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
253 } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
254 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
255 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
256 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
257 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
258 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
259 } else {
260 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
261 #endif
265 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
267 if (!(m->mpc_flags & MPC_APIC_USABLE))
268 return;
270 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
271 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
272 if (nr_ioapics >= MAX_IO_APICS) {
273 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
274 MAX_IO_APICS, nr_ioapics);
275 panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
277 if (!m->mpc_apicaddr) {
278 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
279 " found in MP table, skipping!\n");
280 return;
282 mp_ioapics[nr_ioapics] = *m;
283 nr_ioapics++;
286 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
288 mp_irqs [mp_irq_entries] = *m;
289 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
290 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
291 m->mpc_irqtype, m->mpc_irqflag & 3,
292 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
293 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
294 if (++mp_irq_entries == MAX_IRQ_SOURCES)
295 panic("Max # of irq sources exceeded!!\n");
298 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
300 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
301 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
302 m->mpc_irqtype, m->mpc_irqflag & 3,
303 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
304 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
307 #ifdef CONFIG_X86_NUMAQ
308 static void __init MP_translation_info (struct mpc_config_translation *m)
310 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
312 if (mpc_record >= MAX_MPC_ENTRY)
313 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
314 else
315 translation_table[mpc_record] = m; /* stash this for later */
316 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
317 node_set_online(m->trans_quad);
321 * Read/parse the MPC oem tables
324 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
325 unsigned short oemsize)
327 int count = sizeof (*oemtable); /* the header size */
328 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
330 mpc_record = 0;
331 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
332 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
334 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
335 oemtable->oem_signature[0],
336 oemtable->oem_signature[1],
337 oemtable->oem_signature[2],
338 oemtable->oem_signature[3]);
339 return;
341 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
343 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
344 return;
346 while (count < oemtable->oem_length) {
347 switch (*oemptr) {
348 case MP_TRANSLATION:
350 struct mpc_config_translation *m=
351 (struct mpc_config_translation *)oemptr;
352 MP_translation_info(m);
353 oemptr += sizeof(*m);
354 count += sizeof(*m);
355 ++mpc_record;
356 break;
358 default:
360 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
361 return;
367 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
368 char *productid)
370 if (strncmp(oem, "IBM NUMA", 8))
371 printk("Warning! May not be a NUMA-Q system!\n");
372 if (mpc->mpc_oemptr)
373 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
374 mpc->mpc_oemsize);
376 #endif /* CONFIG_X86_NUMAQ */
379 * Read/parse the MPC
382 static int __init smp_read_mpc(struct mp_config_table *mpc)
384 char str[16];
385 char oem[10];
386 int count=sizeof(*mpc);
387 unsigned char *mpt=((unsigned char *)mpc)+count;
389 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
390 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
391 *(u32 *)mpc->mpc_signature);
392 return 0;
394 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
395 printk(KERN_ERR "SMP mptable: checksum error!\n");
396 return 0;
398 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
399 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
400 mpc->mpc_spec);
401 return 0;
403 if (!mpc->mpc_lapic) {
404 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
405 return 0;
407 memcpy(oem,mpc->mpc_oem,8);
408 oem[8]=0;
409 printk(KERN_INFO "OEM ID: %s ",oem);
411 memcpy(str,mpc->mpc_productid,12);
412 str[12]=0;
413 printk("Product ID: %s ",str);
415 mps_oem_check(mpc, oem, str);
417 printk("APIC at: 0x%X\n", mpc->mpc_lapic);
420 * Save the local APIC address (it might be non-default) -- but only
421 * if we're not using ACPI.
423 if (!acpi_lapic)
424 mp_lapic_addr = mpc->mpc_lapic;
427 * Now process the configuration blocks.
429 mpc_record = 0;
430 while (count < mpc->mpc_length) {
431 switch(*mpt) {
432 case MP_PROCESSOR:
434 struct mpc_config_processor *m=
435 (struct mpc_config_processor *)mpt;
436 /* ACPI may have already provided this data */
437 if (!acpi_lapic)
438 MP_processor_info(m);
439 mpt += sizeof(*m);
440 count += sizeof(*m);
441 break;
443 case MP_BUS:
445 struct mpc_config_bus *m=
446 (struct mpc_config_bus *)mpt;
447 MP_bus_info(m);
448 mpt += sizeof(*m);
449 count += sizeof(*m);
450 break;
452 case MP_IOAPIC:
454 struct mpc_config_ioapic *m=
455 (struct mpc_config_ioapic *)mpt;
456 MP_ioapic_info(m);
457 mpt+=sizeof(*m);
458 count+=sizeof(*m);
459 break;
461 case MP_INTSRC:
463 struct mpc_config_intsrc *m=
464 (struct mpc_config_intsrc *)mpt;
466 MP_intsrc_info(m);
467 mpt+=sizeof(*m);
468 count+=sizeof(*m);
469 break;
471 case MP_LINTSRC:
473 struct mpc_config_lintsrc *m=
474 (struct mpc_config_lintsrc *)mpt;
475 MP_lintsrc_info(m);
476 mpt+=sizeof(*m);
477 count+=sizeof(*m);
478 break;
480 default:
482 count = mpc->mpc_length;
483 break;
486 ++mpc_record;
488 setup_apic_routing();
489 if (!num_processors)
490 printk(KERN_ERR "SMP mptable: no processors registered!\n");
491 return num_processors;
494 static int __init ELCR_trigger(unsigned int irq)
496 unsigned int port;
498 port = 0x4d0 + (irq >> 3);
499 return (inb(port) >> (irq & 7)) & 1;
502 static void __init construct_default_ioirq_mptable(int mpc_default_type)
504 struct mpc_config_intsrc intsrc;
505 int i;
506 int ELCR_fallback = 0;
508 intsrc.mpc_type = MP_INTSRC;
509 intsrc.mpc_irqflag = 0; /* conforming */
510 intsrc.mpc_srcbus = 0;
511 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
513 intsrc.mpc_irqtype = mp_INT;
516 * If true, we have an ISA/PCI system with no IRQ entries
517 * in the MP table. To prevent the PCI interrupts from being set up
518 * incorrectly, we try to use the ELCR. The sanity check to see if
519 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
520 * never be level sensitive, so we simply see if the ELCR agrees.
521 * If it does, we assume it's valid.
523 if (mpc_default_type == 5) {
524 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
526 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
527 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
528 else {
529 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
530 ELCR_fallback = 1;
534 for (i = 0; i < 16; i++) {
535 switch (mpc_default_type) {
536 case 2:
537 if (i == 0 || i == 13)
538 continue; /* IRQ0 & IRQ13 not connected */
539 /* fall through */
540 default:
541 if (i == 2)
542 continue; /* IRQ2 is never connected */
545 if (ELCR_fallback) {
547 * If the ELCR indicates a level-sensitive interrupt, we
548 * copy that information over to the MP table in the
549 * irqflag field (level sensitive, active high polarity).
551 if (ELCR_trigger(i))
552 intsrc.mpc_irqflag = 13;
553 else
554 intsrc.mpc_irqflag = 0;
557 intsrc.mpc_srcbusirq = i;
558 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
559 MP_intsrc_info(&intsrc);
562 intsrc.mpc_irqtype = mp_ExtINT;
563 intsrc.mpc_srcbusirq = 0;
564 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
565 MP_intsrc_info(&intsrc);
568 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
570 struct mpc_config_processor processor;
571 struct mpc_config_bus bus;
572 struct mpc_config_ioapic ioapic;
573 struct mpc_config_lintsrc lintsrc;
574 int linttypes[2] = { mp_ExtINT, mp_NMI };
575 int i;
578 * local APIC has default address
580 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
583 * 2 CPUs, numbered 0 & 1.
585 processor.mpc_type = MP_PROCESSOR;
586 /* Either an integrated APIC or a discrete 82489DX. */
587 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
588 processor.mpc_cpuflag = CPU_ENABLED;
589 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
590 (boot_cpu_data.x86_model << 4) |
591 boot_cpu_data.x86_mask;
592 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
593 processor.mpc_reserved[0] = 0;
594 processor.mpc_reserved[1] = 0;
595 for (i = 0; i < 2; i++) {
596 processor.mpc_apicid = i;
597 MP_processor_info(&processor);
600 bus.mpc_type = MP_BUS;
601 bus.mpc_busid = 0;
602 switch (mpc_default_type) {
603 default:
604 printk("???\n");
605 printk(KERN_ERR "Unknown standard configuration %d\n",
606 mpc_default_type);
607 /* fall through */
608 case 1:
609 case 5:
610 memcpy(bus.mpc_bustype, "ISA ", 6);
611 break;
612 case 2:
613 case 6:
614 case 3:
615 memcpy(bus.mpc_bustype, "EISA ", 6);
616 break;
617 case 4:
618 case 7:
619 memcpy(bus.mpc_bustype, "MCA ", 6);
621 MP_bus_info(&bus);
622 if (mpc_default_type > 4) {
623 bus.mpc_busid = 1;
624 memcpy(bus.mpc_bustype, "PCI ", 6);
625 MP_bus_info(&bus);
628 ioapic.mpc_type = MP_IOAPIC;
629 ioapic.mpc_apicid = 2;
630 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
631 ioapic.mpc_flags = MPC_APIC_USABLE;
632 ioapic.mpc_apicaddr = 0xFEC00000;
633 MP_ioapic_info(&ioapic);
636 * We set up most of the low 16 IO-APIC pins according to MPS rules.
638 construct_default_ioirq_mptable(mpc_default_type);
640 lintsrc.mpc_type = MP_LINTSRC;
641 lintsrc.mpc_irqflag = 0; /* conforming */
642 lintsrc.mpc_srcbusid = 0;
643 lintsrc.mpc_srcbusirq = 0;
644 lintsrc.mpc_destapic = MP_APIC_ALL;
645 for (i = 0; i < 2; i++) {
646 lintsrc.mpc_irqtype = linttypes[i];
647 lintsrc.mpc_destapiclint = i;
648 MP_lintsrc_info(&lintsrc);
652 static struct intel_mp_floating *mpf_found;
655 * Scan the memory blocks for an SMP configuration block.
657 void __init get_smp_config (void)
659 struct intel_mp_floating *mpf = mpf_found;
662 * ACPI supports both logical (e.g. Hyper-Threading) and physical
663 * processors, where MPS only supports physical.
665 if (acpi_lapic && acpi_ioapic) {
666 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
667 return;
669 else if (acpi_lapic)
670 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
672 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
673 if (mpf->mpf_feature2 & (1<<7)) {
674 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
675 pic_mode = 1;
676 } else {
677 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
678 pic_mode = 0;
682 * Now see if we need to read further.
684 if (mpf->mpf_feature1 != 0) {
686 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
687 construct_default_ISA_mptable(mpf->mpf_feature1);
689 } else if (mpf->mpf_physptr) {
692 * Read the physical hardware table. Anything here will
693 * override the defaults.
695 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
696 smp_found_config = 0;
697 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
698 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
699 return;
702 * If there are no explicit MP IRQ entries, then we are
703 * broken. We set up most of the low 16 IO-APIC pins to
704 * ISA defaults and hope it will work.
706 if (!mp_irq_entries) {
707 struct mpc_config_bus bus;
709 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
711 bus.mpc_type = MP_BUS;
712 bus.mpc_busid = 0;
713 memcpy(bus.mpc_bustype, "ISA ", 6);
714 MP_bus_info(&bus);
716 construct_default_ioirq_mptable(0);
719 } else
720 BUG();
722 printk(KERN_INFO "Processors: %d\n", num_processors);
724 * Only use the first configuration found.
728 static int __init smp_scan_config (unsigned long base, unsigned long length)
730 unsigned long *bp = phys_to_virt(base);
731 struct intel_mp_floating *mpf;
733 printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
734 if (sizeof(*mpf) != 16)
735 printk("Error: MPF size\n");
737 while (length > 0) {
738 mpf = (struct intel_mp_floating *)bp;
739 if ((*bp == SMP_MAGIC_IDENT) &&
740 (mpf->mpf_length == 1) &&
741 !mpf_checksum((unsigned char *)bp, 16) &&
742 ((mpf->mpf_specification == 1)
743 || (mpf->mpf_specification == 4)) ) {
745 smp_found_config = 1;
746 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
747 mpf, virt_to_phys(mpf));
748 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
749 BOOTMEM_DEFAULT);
750 if (mpf->mpf_physptr) {
752 * We cannot access to MPC table to compute
753 * table size yet, as only few megabytes from
754 * the bottom is mapped now.
755 * PC-9800's MPC table places on the very last
756 * of physical memory; so that simply reserving
757 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
758 * in reserve_bootmem.
760 unsigned long size = PAGE_SIZE;
761 unsigned long end = max_low_pfn * PAGE_SIZE;
762 if (mpf->mpf_physptr + size > end)
763 size = end - mpf->mpf_physptr;
764 reserve_bootmem(mpf->mpf_physptr, size,
765 BOOTMEM_DEFAULT);
768 mpf_found = mpf;
769 return 1;
771 bp += 4;
772 length -= 16;
774 return 0;
777 void __init find_smp_config (void)
779 unsigned int address;
782 * FIXME: Linux assumes you have 640K of base ram..
783 * this continues the error...
785 * 1) Scan the bottom 1K for a signature
786 * 2) Scan the top 1K of base RAM
787 * 3) Scan the 64K of bios
789 if (smp_scan_config(0x0,0x400) ||
790 smp_scan_config(639*0x400,0x400) ||
791 smp_scan_config(0xF0000,0x10000))
792 return;
794 * If it is an SMP machine we should know now, unless the
795 * configuration is in an EISA/MCA bus machine with an
796 * extended bios data area.
798 * there is a real-mode segmented pointer pointing to the
799 * 4K EBDA area at 0x40E, calculate and scan it here.
801 * NOTE! There are Linux loaders that will corrupt the EBDA
802 * area, and as such this kind of SMP config may be less
803 * trustworthy, simply because the SMP table may have been
804 * stomped on during early boot. These loaders are buggy and
805 * should be fixed.
807 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
810 address = get_bios_ebda();
811 if (address)
812 smp_scan_config(address, 0x400);
815 int es7000_plat;
817 /* --------------------------------------------------------------------------
818 ACPI-based MP Configuration
819 -------------------------------------------------------------------------- */
821 #ifdef CONFIG_ACPI
823 void __init mp_register_lapic_address(u64 address)
825 mp_lapic_addr = (unsigned long) address;
827 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
829 if (boot_cpu_physical_apicid == -1U)
830 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
832 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
835 void __cpuinit mp_register_lapic (u8 id, u8 enabled)
837 struct mpc_config_processor processor;
838 int boot_cpu = 0;
840 if (MAX_APICS - id <= 0) {
841 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
842 id, MAX_APICS);
843 return;
846 if (id == boot_cpu_physical_apicid)
847 boot_cpu = 1;
849 processor.mpc_type = MP_PROCESSOR;
850 processor.mpc_apicid = id;
851 processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
852 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
853 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
854 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
855 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
856 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
857 processor.mpc_reserved[0] = 0;
858 processor.mpc_reserved[1] = 0;
860 MP_processor_info(&processor);
863 #ifdef CONFIG_X86_IO_APIC
865 #define MP_ISA_BUS 0
866 #define MP_MAX_IOAPIC_PIN 127
868 static struct mp_ioapic_routing {
869 int apic_id;
870 int gsi_base;
871 int gsi_end;
872 u32 pin_programmed[4];
873 } mp_ioapic_routing[MAX_IO_APICS];
875 static int mp_find_ioapic (int gsi)
877 int i = 0;
879 /* Find the IOAPIC that manages this GSI. */
880 for (i = 0; i < nr_ioapics; i++) {
881 if ((gsi >= mp_ioapic_routing[i].gsi_base)
882 && (gsi <= mp_ioapic_routing[i].gsi_end))
883 return i;
886 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
888 return -1;
891 void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
893 int idx = 0;
894 int tmpid;
896 if (nr_ioapics >= MAX_IO_APICS) {
897 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
898 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
899 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
901 if (!address) {
902 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
903 " found in MADT table, skipping!\n");
904 return;
907 idx = nr_ioapics++;
909 mp_ioapics[idx].mpc_type = MP_IOAPIC;
910 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
911 mp_ioapics[idx].mpc_apicaddr = address;
913 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
914 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
915 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
916 tmpid = io_apic_get_unique_id(idx, id);
917 else
918 tmpid = id;
919 if (tmpid == -1) {
920 nr_ioapics--;
921 return;
923 mp_ioapics[idx].mpc_apicid = tmpid;
924 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
927 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
928 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
930 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
931 mp_ioapic_routing[idx].gsi_base = gsi_base;
932 mp_ioapic_routing[idx].gsi_end = gsi_base +
933 io_apic_get_redir_entries(idx);
935 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
936 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
937 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
938 mp_ioapic_routing[idx].gsi_base,
939 mp_ioapic_routing[idx].gsi_end);
942 void __init
943 mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
945 struct mpc_config_intsrc intsrc;
946 int ioapic = -1;
947 int pin = -1;
950 * Convert 'gsi' to 'ioapic.pin'.
952 ioapic = mp_find_ioapic(gsi);
953 if (ioapic < 0)
954 return;
955 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
958 * TBD: This check is for faulty timer entries, where the override
959 * erroneously sets the trigger to level, resulting in a HUGE
960 * increase of timer interrupts!
962 if ((bus_irq == 0) && (trigger == 3))
963 trigger = 1;
965 intsrc.mpc_type = MP_INTSRC;
966 intsrc.mpc_irqtype = mp_INT;
967 intsrc.mpc_irqflag = (trigger << 2) | polarity;
968 intsrc.mpc_srcbus = MP_ISA_BUS;
969 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
970 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
971 intsrc.mpc_dstirq = pin; /* INTIN# */
973 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
974 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
975 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
976 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
978 mp_irqs[mp_irq_entries] = intsrc;
979 if (++mp_irq_entries == MAX_IRQ_SOURCES)
980 panic("Max # of irq sources exceeded!\n");
983 void __init mp_config_acpi_legacy_irqs (void)
985 struct mpc_config_intsrc intsrc;
986 int i = 0;
987 int ioapic = -1;
989 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
991 * Fabricate the legacy ISA bus (bus #31).
993 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
994 #endif
995 set_bit(MP_ISA_BUS, mp_bus_not_pci);
996 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
999 * Older generations of ES7000 have no legacy identity mappings
1001 if (es7000_plat == 1)
1002 return;
1005 * Locate the IOAPIC that manages the ISA IRQs (0-15).
1007 ioapic = mp_find_ioapic(0);
1008 if (ioapic < 0)
1009 return;
1011 intsrc.mpc_type = MP_INTSRC;
1012 intsrc.mpc_irqflag = 0; /* Conforming */
1013 intsrc.mpc_srcbus = MP_ISA_BUS;
1014 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
1017 * Use the default configuration for the IRQs 0-15. Unless
1018 * overridden by (MADT) interrupt source override entries.
1020 for (i = 0; i < 16; i++) {
1021 int idx;
1023 for (idx = 0; idx < mp_irq_entries; idx++) {
1024 struct mpc_config_intsrc *irq = mp_irqs + idx;
1026 /* Do we already have a mapping for this ISA IRQ? */
1027 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
1028 break;
1030 /* Do we already have a mapping for this IOAPIC pin */
1031 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1032 (irq->mpc_dstirq == i))
1033 break;
1036 if (idx != mp_irq_entries) {
1037 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1038 continue; /* IRQ already used */
1041 intsrc.mpc_irqtype = mp_INT;
1042 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1043 intsrc.mpc_dstirq = i;
1045 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1046 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1047 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1048 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1049 intsrc.mpc_dstirq);
1051 mp_irqs[mp_irq_entries] = intsrc;
1052 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1053 panic("Max # of irq sources exceeded!\n");
1057 #define MAX_GSI_NUM 4096
1058 #define IRQ_COMPRESSION_START 64
1060 int mp_register_gsi(u32 gsi, int triggering, int polarity)
1062 int ioapic = -1;
1063 int ioapic_pin = 0;
1064 int idx, bit = 0;
1065 static int pci_irq = IRQ_COMPRESSION_START;
1067 * Mapping between Global System Interrupts, which
1068 * represent all possible interrupts, and IRQs
1069 * assigned to actual devices.
1071 static int gsi_to_irq[MAX_GSI_NUM];
1073 /* Don't set up the ACPI SCI because it's already set up */
1074 if (acpi_gbl_FADT.sci_interrupt == gsi)
1075 return gsi;
1077 ioapic = mp_find_ioapic(gsi);
1078 if (ioapic < 0) {
1079 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1080 return gsi;
1083 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1085 if (ioapic_renumber_irq)
1086 gsi = ioapic_renumber_irq(ioapic, gsi);
1089 * Avoid pin reprogramming. PRTs typically include entries
1090 * with redundant pin->gsi mappings (but unique PCI devices);
1091 * we only program the IOAPIC on the first.
1093 bit = ioapic_pin % 32;
1094 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1095 if (idx > 3) {
1096 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1097 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1098 ioapic_pin);
1099 return gsi;
1101 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1102 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1103 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1104 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1107 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1110 * For GSI >= 64, use IRQ compression
1112 if ((gsi >= IRQ_COMPRESSION_START)
1113 && (triggering == ACPI_LEVEL_SENSITIVE)) {
1115 * For PCI devices assign IRQs in order, avoiding gaps
1116 * due to unused I/O APIC pins.
1118 int irq = gsi;
1119 if (gsi < MAX_GSI_NUM) {
1121 * Retain the VIA chipset work-around (gsi > 15), but
1122 * avoid a problem where the 8254 timer (IRQ0) is setup
1123 * via an override (so it's not on pin 0 of the ioapic),
1124 * and at the same time, the pin 0 interrupt is a PCI
1125 * type. The gsi > 15 test could cause these two pins
1126 * to be shared as IRQ0, and they are not shareable.
1127 * So test for this condition, and if necessary, avoid
1128 * the pin collision.
1130 if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
1131 gsi = pci_irq++;
1133 * Don't assign IRQ used by ACPI SCI
1135 if (gsi == acpi_gbl_FADT.sci_interrupt)
1136 gsi = pci_irq++;
1137 gsi_to_irq[irq] = gsi;
1138 } else {
1139 printk(KERN_ERR "GSI %u is too high\n", gsi);
1140 return gsi;
1144 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1145 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1146 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1147 return gsi;
1150 #endif /* CONFIG_X86_IO_APIC */
1151 #endif /* CONFIG_ACPI */