x86: move mp_ioapic_routing to boot.c
[linux-2.6/mini2440.git] / arch / x86 / kernel / mpparse_32.c
blobb6f1e4e235e314cb8ebc3854ed5795a5f1dcb4a4
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/acpi.h>
19 #include <linux/delay.h>
20 #include <linux/bootmem.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/bitops.h>
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/bios_ebda.h>
32 #include <mach_apic.h>
33 #include <mach_apicdef.h>
34 #include <mach_mpparse.h>
36 /* Have we found an MP table */
37 int smp_found_config;
40 * Various Linux-internal data structures created from the
41 * MP-table.
43 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
44 int mp_bus_id_to_type [MAX_MP_BUSSES];
45 #endif
46 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
47 int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
48 static int mp_current_pci_id;
50 /* # of MP IRQ source entries */
51 struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
53 /* MP IRQ source entries */
54 int mp_irq_entries;
56 int pic_mode;
58 /* Make it easy to share the UP and SMP code: */
59 #ifndef CONFIG_X86_SMP
60 unsigned int num_processors;
61 unsigned disabled_cpus __cpuinitdata;
62 #ifndef CONFIG_X86_LOCAL_APIC
63 unsigned int boot_cpu_physical_apicid = -1U;
64 #endif
65 #endif
68 * Intel MP BIOS table parsing routines:
73 * Checksum an MP configuration block.
76 static int __init mpf_checksum(unsigned char *mp, int len)
78 int sum = 0;
80 while (len--)
81 sum += *mp++;
83 return sum & 0xFF;
86 #ifdef CONFIG_X86_NUMAQ
88 * Have to match translation table entries to main table entries by counter
89 * hence the mpc_record variable .... can't see a less disgusting way of
90 * doing this ....
93 static int mpc_record;
94 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
95 #endif
97 static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
99 int apicid;
101 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
102 #ifdef CONFIG_X86_SMP
103 disabled_cpus++;
104 #endif
105 return;
108 #ifdef CONFIG_X86_NUMAQ
109 apicid = mpc_apic_id(m, translation_table[mpc_record]);
110 #else
111 Dprintk("Processor #%d %u:%u APIC version %d\n",
112 m->mpc_apicid,
113 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
114 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
115 m->mpc_apicver);
116 apicid = m->mpc_apicid;
117 #endif
119 if (m->mpc_featureflag&(1<<0))
120 Dprintk(" Floating point unit present.\n");
121 if (m->mpc_featureflag&(1<<7))
122 Dprintk(" Machine Exception supported.\n");
123 if (m->mpc_featureflag&(1<<8))
124 Dprintk(" 64 bit compare & exchange supported.\n");
125 if (m->mpc_featureflag&(1<<9))
126 Dprintk(" Internal APIC present.\n");
127 if (m->mpc_featureflag&(1<<11))
128 Dprintk(" SEP present.\n");
129 if (m->mpc_featureflag&(1<<12))
130 Dprintk(" MTRR present.\n");
131 if (m->mpc_featureflag&(1<<13))
132 Dprintk(" PGE present.\n");
133 if (m->mpc_featureflag&(1<<14))
134 Dprintk(" MCA present.\n");
135 if (m->mpc_featureflag&(1<<15))
136 Dprintk(" CMOV present.\n");
137 if (m->mpc_featureflag&(1<<16))
138 Dprintk(" PAT present.\n");
139 if (m->mpc_featureflag&(1<<17))
140 Dprintk(" PSE present.\n");
141 if (m->mpc_featureflag&(1<<18))
142 Dprintk(" PSN present.\n");
143 if (m->mpc_featureflag&(1<<19))
144 Dprintk(" Cache Line Flush Instruction present.\n");
145 /* 20 Reserved */
146 if (m->mpc_featureflag&(1<<21))
147 Dprintk(" Debug Trace and EMON Store present.\n");
148 if (m->mpc_featureflag&(1<<22))
149 Dprintk(" ACPI Thermal Throttle Registers present.\n");
150 if (m->mpc_featureflag&(1<<23))
151 Dprintk(" MMX present.\n");
152 if (m->mpc_featureflag&(1<<24))
153 Dprintk(" FXSR present.\n");
154 if (m->mpc_featureflag&(1<<25))
155 Dprintk(" XMM present.\n");
156 if (m->mpc_featureflag&(1<<26))
157 Dprintk(" Willamette New Instructions present.\n");
158 if (m->mpc_featureflag&(1<<27))
159 Dprintk(" Self Snoop present.\n");
160 if (m->mpc_featureflag&(1<<28))
161 Dprintk(" HT present.\n");
162 if (m->mpc_featureflag&(1<<29))
163 Dprintk(" Thermal Monitor present.\n");
164 /* 30, 31 Reserved */
167 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
168 Dprintk(" Bootup CPU\n");
169 boot_cpu_physical_apicid = m->mpc_apicid;
172 generic_processor_info(apicid, m->mpc_apicver);
175 static void __init MP_bus_info (struct mpc_config_bus *m)
177 char str[7];
179 memcpy(str, m->mpc_bustype, 6);
180 str[6] = 0;
182 #ifdef CONFIG_X86_NUMAQ
183 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
184 #else
185 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
186 #endif
188 #if MAX_MP_BUSSES < 256
189 if (m->mpc_busid >= MAX_MP_BUSSES) {
190 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
191 " is too large, max. supported is %d\n",
192 m->mpc_busid, str, MAX_MP_BUSSES - 1);
193 return;
195 #endif
197 set_bit(m->mpc_busid, mp_bus_not_pci);
198 if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
199 #ifdef CONFIG_X86_NUMAQ
200 mpc_oem_pci_bus(m, translation_table[mpc_record]);
201 #endif
202 clear_bit(m->mpc_busid, mp_bus_not_pci);
203 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
204 mp_current_pci_id++;
205 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
206 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
207 } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
208 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
209 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
210 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
211 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
212 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
213 } else {
214 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
215 #endif
219 #ifdef CONFIG_X86_IO_APIC
221 static int bad_ioapic(unsigned long address)
223 if (nr_ioapics >= MAX_IO_APICS) {
224 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
225 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
226 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
228 if (!address) {
229 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
230 " found in table, skipping!\n");
231 return 1;
233 return 0;
236 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
238 if (!(m->mpc_flags & MPC_APIC_USABLE))
239 return;
241 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
242 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
244 if (bad_ioapic(m->mpc_apicaddr))
245 return;
247 mp_ioapics[nr_ioapics] = *m;
248 nr_ioapics++;
251 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
253 mp_irqs [mp_irq_entries] = *m;
254 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
255 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
256 m->mpc_irqtype, m->mpc_irqflag & 3,
257 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
258 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
259 if (++mp_irq_entries == MAX_IRQ_SOURCES)
260 panic("Max # of irq sources exceeded!!\n");
263 #endif
265 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
267 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
268 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
269 m->mpc_irqtype, m->mpc_irqflag & 3,
270 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
271 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
274 #ifdef CONFIG_X86_NUMAQ
275 static void __init MP_translation_info (struct mpc_config_translation *m)
277 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
279 if (mpc_record >= MAX_MPC_ENTRY)
280 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
281 else
282 translation_table[mpc_record] = m; /* stash this for later */
283 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
284 node_set_online(m->trans_quad);
288 * Read/parse the MPC oem tables
291 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
292 unsigned short oemsize)
294 int count = sizeof (*oemtable); /* the header size */
295 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
297 mpc_record = 0;
298 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
299 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
301 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
302 oemtable->oem_signature[0],
303 oemtable->oem_signature[1],
304 oemtable->oem_signature[2],
305 oemtable->oem_signature[3]);
306 return;
308 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
310 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
311 return;
313 while (count < oemtable->oem_length) {
314 switch (*oemptr) {
315 case MP_TRANSLATION:
317 struct mpc_config_translation *m=
318 (struct mpc_config_translation *)oemptr;
319 MP_translation_info(m);
320 oemptr += sizeof(*m);
321 count += sizeof(*m);
322 ++mpc_record;
323 break;
325 default:
327 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
328 return;
334 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
335 char *productid)
337 if (strncmp(oem, "IBM NUMA", 8))
338 printk("Warning! May not be a NUMA-Q system!\n");
339 if (mpc->mpc_oemptr)
340 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
341 mpc->mpc_oemsize);
343 #endif /* CONFIG_X86_NUMAQ */
346 * Read/parse the MPC
349 static int __init smp_read_mpc(struct mp_config_table *mpc)
351 char str[16];
352 char oem[10];
353 int count=sizeof(*mpc);
354 unsigned char *mpt=((unsigned char *)mpc)+count;
356 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
357 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
358 *(u32 *)mpc->mpc_signature);
359 return 0;
361 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
362 printk(KERN_ERR "SMP mptable: checksum error!\n");
363 return 0;
365 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
366 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
367 mpc->mpc_spec);
368 return 0;
370 if (!mpc->mpc_lapic) {
371 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
372 return 0;
374 memcpy(oem,mpc->mpc_oem,8);
375 oem[8]=0;
376 printk(KERN_INFO "OEM ID: %s ",oem);
378 memcpy(str,mpc->mpc_productid,12);
379 str[12]=0;
380 printk("Product ID: %s ",str);
382 mps_oem_check(mpc, oem, str);
384 printk("APIC at: 0x%X\n", mpc->mpc_lapic);
387 * Save the local APIC address (it might be non-default) -- but only
388 * if we're not using ACPI.
390 if (!acpi_lapic)
391 mp_lapic_addr = mpc->mpc_lapic;
394 * Now process the configuration blocks.
396 #ifdef CONFIG_X86_NUMAQ
397 mpc_record = 0;
398 #endif
399 while (count < mpc->mpc_length) {
400 switch(*mpt) {
401 case MP_PROCESSOR:
403 struct mpc_config_processor *m=
404 (struct mpc_config_processor *)mpt;
405 /* ACPI may have already provided this data */
406 if (!acpi_lapic)
407 MP_processor_info(m);
408 mpt += sizeof(*m);
409 count += sizeof(*m);
410 break;
412 case MP_BUS:
414 struct mpc_config_bus *m=
415 (struct mpc_config_bus *)mpt;
416 MP_bus_info(m);
417 mpt += sizeof(*m);
418 count += sizeof(*m);
419 break;
421 case MP_IOAPIC:
423 #ifdef CONFIG_X86_IO_APIC
424 struct mpc_config_ioapic *m=
425 (struct mpc_config_ioapic *)mpt;
426 MP_ioapic_info(m);
427 #endif
428 mpt+=sizeof(struct mpc_config_ioapic);
429 count+=sizeof(struct mpc_config_ioapic);
430 break;
432 case MP_INTSRC:
434 #ifdef CONFIG_X86_IO_APIC
435 struct mpc_config_intsrc *m=
436 (struct mpc_config_intsrc *)mpt;
438 MP_intsrc_info(m);
439 #endif
440 mpt+=sizeof(struct mpc_config_intsrc);
441 count+=sizeof(struct mpc_config_intsrc);
442 break;
444 case MP_LINTSRC:
446 struct mpc_config_lintsrc *m=
447 (struct mpc_config_lintsrc *)mpt;
448 MP_lintsrc_info(m);
449 mpt+=sizeof(*m);
450 count+=sizeof(*m);
451 break;
453 default:
455 count = mpc->mpc_length;
456 break;
459 #ifdef CONFIG_X86_NUMAQ
460 ++mpc_record;
461 #endif
463 setup_apic_routing();
464 if (!num_processors)
465 printk(KERN_ERR "SMP mptable: no processors registered!\n");
466 return num_processors;
469 #ifdef CONFIG_X86_IO_APIC
471 static int __init ELCR_trigger(unsigned int irq)
473 unsigned int port;
475 port = 0x4d0 + (irq >> 3);
476 return (inb(port) >> (irq & 7)) & 1;
479 static void __init construct_default_ioirq_mptable(int mpc_default_type)
481 struct mpc_config_intsrc intsrc;
482 int i;
483 int ELCR_fallback = 0;
485 intsrc.mpc_type = MP_INTSRC;
486 intsrc.mpc_irqflag = 0; /* conforming */
487 intsrc.mpc_srcbus = 0;
488 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
490 intsrc.mpc_irqtype = mp_INT;
493 * If true, we have an ISA/PCI system with no IRQ entries
494 * in the MP table. To prevent the PCI interrupts from being set up
495 * incorrectly, we try to use the ELCR. The sanity check to see if
496 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
497 * never be level sensitive, so we simply see if the ELCR agrees.
498 * If it does, we assume it's valid.
500 if (mpc_default_type == 5) {
501 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
503 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
504 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
505 else {
506 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
507 ELCR_fallback = 1;
511 for (i = 0; i < 16; i++) {
512 switch (mpc_default_type) {
513 case 2:
514 if (i == 0 || i == 13)
515 continue; /* IRQ0 & IRQ13 not connected */
516 /* fall through */
517 default:
518 if (i == 2)
519 continue; /* IRQ2 is never connected */
522 if (ELCR_fallback) {
524 * If the ELCR indicates a level-sensitive interrupt, we
525 * copy that information over to the MP table in the
526 * irqflag field (level sensitive, active high polarity).
528 if (ELCR_trigger(i))
529 intsrc.mpc_irqflag = 13;
530 else
531 intsrc.mpc_irqflag = 0;
534 intsrc.mpc_srcbusirq = i;
535 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
536 MP_intsrc_info(&intsrc);
539 intsrc.mpc_irqtype = mp_ExtINT;
540 intsrc.mpc_srcbusirq = 0;
541 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
542 MP_intsrc_info(&intsrc);
545 #endif
547 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
549 struct mpc_config_processor processor;
550 struct mpc_config_bus bus;
551 #ifdef CONFIG_X86_IO_APIC
552 struct mpc_config_ioapic ioapic;
553 #endif
554 struct mpc_config_lintsrc lintsrc;
555 int linttypes[2] = { mp_ExtINT, mp_NMI };
556 int i;
559 * local APIC has default address
561 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
564 * 2 CPUs, numbered 0 & 1.
566 processor.mpc_type = MP_PROCESSOR;
567 /* Either an integrated APIC or a discrete 82489DX. */
568 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
569 processor.mpc_cpuflag = CPU_ENABLED;
570 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
571 (boot_cpu_data.x86_model << 4) |
572 boot_cpu_data.x86_mask;
573 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
574 processor.mpc_reserved[0] = 0;
575 processor.mpc_reserved[1] = 0;
576 for (i = 0; i < 2; i++) {
577 processor.mpc_apicid = i;
578 MP_processor_info(&processor);
581 bus.mpc_type = MP_BUS;
582 bus.mpc_busid = 0;
583 switch (mpc_default_type) {
584 default:
585 printk("???\n");
586 printk(KERN_ERR "Unknown standard configuration %d\n",
587 mpc_default_type);
588 /* fall through */
589 case 1:
590 case 5:
591 memcpy(bus.mpc_bustype, "ISA ", 6);
592 break;
593 case 2:
594 case 6:
595 case 3:
596 memcpy(bus.mpc_bustype, "EISA ", 6);
597 break;
598 case 4:
599 case 7:
600 memcpy(bus.mpc_bustype, "MCA ", 6);
602 MP_bus_info(&bus);
603 if (mpc_default_type > 4) {
604 bus.mpc_busid = 1;
605 memcpy(bus.mpc_bustype, "PCI ", 6);
606 MP_bus_info(&bus);
609 #ifdef CONFIG_X86_IO_APIC
610 ioapic.mpc_type = MP_IOAPIC;
611 ioapic.mpc_apicid = 2;
612 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
613 ioapic.mpc_flags = MPC_APIC_USABLE;
614 ioapic.mpc_apicaddr = 0xFEC00000;
615 MP_ioapic_info(&ioapic);
618 * We set up most of the low 16 IO-APIC pins according to MPS rules.
620 construct_default_ioirq_mptable(mpc_default_type);
621 #endif
622 lintsrc.mpc_type = MP_LINTSRC;
623 lintsrc.mpc_irqflag = 0; /* conforming */
624 lintsrc.mpc_srcbusid = 0;
625 lintsrc.mpc_srcbusirq = 0;
626 lintsrc.mpc_destapic = MP_APIC_ALL;
627 for (i = 0; i < 2; i++) {
628 lintsrc.mpc_irqtype = linttypes[i];
629 lintsrc.mpc_destapiclint = i;
630 MP_lintsrc_info(&lintsrc);
634 static struct intel_mp_floating *mpf_found;
637 * Scan the memory blocks for an SMP configuration block.
639 void __init get_smp_config (void)
641 struct intel_mp_floating *mpf = mpf_found;
644 * ACPI supports both logical (e.g. Hyper-Threading) and physical
645 * processors, where MPS only supports physical.
647 if (acpi_lapic && acpi_ioapic) {
648 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
649 return;
651 else if (acpi_lapic)
652 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
654 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
655 if (mpf->mpf_feature2 & (1<<7)) {
656 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
657 pic_mode = 1;
658 } else {
659 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
660 pic_mode = 0;
664 * Now see if we need to read further.
666 if (mpf->mpf_feature1 != 0) {
668 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
669 construct_default_ISA_mptable(mpf->mpf_feature1);
671 } else if (mpf->mpf_physptr) {
674 * Read the physical hardware table. Anything here will
675 * override the defaults.
677 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
678 smp_found_config = 0;
679 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
680 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
681 return;
684 #ifdef CONFIG_X86_IO_APIC
686 * If there are no explicit MP IRQ entries, then we are
687 * broken. We set up most of the low 16 IO-APIC pins to
688 * ISA defaults and hope it will work.
690 if (!mp_irq_entries) {
691 struct mpc_config_bus bus;
693 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
695 bus.mpc_type = MP_BUS;
696 bus.mpc_busid = 0;
697 memcpy(bus.mpc_bustype, "ISA ", 6);
698 MP_bus_info(&bus);
700 construct_default_ioirq_mptable(0);
702 #endif
703 } else
704 BUG();
706 printk(KERN_INFO "Processors: %d\n", num_processors);
708 * Only use the first configuration found.
712 static int __init smp_scan_config (unsigned long base, unsigned long length)
714 unsigned long *bp = phys_to_virt(base);
715 struct intel_mp_floating *mpf;
717 printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
718 if (sizeof(*mpf) != 16)
719 printk("Error: MPF size\n");
721 while (length > 0) {
722 mpf = (struct intel_mp_floating *)bp;
723 if ((*bp == SMP_MAGIC_IDENT) &&
724 (mpf->mpf_length == 1) &&
725 !mpf_checksum((unsigned char *)bp, 16) &&
726 ((mpf->mpf_specification == 1)
727 || (mpf->mpf_specification == 4)) ) {
729 smp_found_config = 1;
730 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
731 mpf, virt_to_phys(mpf));
732 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
733 BOOTMEM_DEFAULT);
734 if (mpf->mpf_physptr) {
736 * We cannot access to MPC table to compute
737 * table size yet, as only few megabytes from
738 * the bottom is mapped now.
739 * PC-9800's MPC table places on the very last
740 * of physical memory; so that simply reserving
741 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
742 * in reserve_bootmem.
744 unsigned long size = PAGE_SIZE;
745 unsigned long end = max_low_pfn * PAGE_SIZE;
746 if (mpf->mpf_physptr + size > end)
747 size = end - mpf->mpf_physptr;
748 reserve_bootmem(mpf->mpf_physptr, size,
749 BOOTMEM_DEFAULT);
752 mpf_found = mpf;
753 return 1;
755 bp += 4;
756 length -= 16;
758 return 0;
761 void __init find_smp_config (void)
763 unsigned int address;
766 * FIXME: Linux assumes you have 640K of base ram..
767 * this continues the error...
769 * 1) Scan the bottom 1K for a signature
770 * 2) Scan the top 1K of base RAM
771 * 3) Scan the 64K of bios
773 if (smp_scan_config(0x0,0x400) ||
774 smp_scan_config(639*0x400,0x400) ||
775 smp_scan_config(0xF0000,0x10000))
776 return;
778 * If it is an SMP machine we should know now, unless the
779 * configuration is in an EISA/MCA bus machine with an
780 * extended bios data area.
782 * there is a real-mode segmented pointer pointing to the
783 * 4K EBDA area at 0x40E, calculate and scan it here.
785 * NOTE! There are Linux loaders that will corrupt the EBDA
786 * area, and as such this kind of SMP config may be less
787 * trustworthy, simply because the SMP table may have been
788 * stomped on during early boot. These loaders are buggy and
789 * should be fixed.
791 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
794 address = get_bios_ebda();
795 if (address)
796 smp_scan_config(address, 0x400);
799 /* --------------------------------------------------------------------------
800 ACPI-based MP Configuration
801 -------------------------------------------------------------------------- */
803 #ifdef CONFIG_ACPI
805 void __init mp_register_lapic_address(u64 address)
807 mp_lapic_addr = (unsigned long) address;
809 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
811 if (boot_cpu_physical_apicid == -1U)
812 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
814 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
817 void __cpuinit mp_register_lapic (int id, u8 enabled)
819 if (MAX_APICS - id <= 0) {
820 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
821 id, MAX_APICS);
822 return;
825 if (!enabled) {
826 #ifdef CONFIG_X86_SMP
827 ++disabled_cpus;
828 #endif
829 return;
832 generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR)));
835 #ifdef CONFIG_X86_IO_APIC
837 #define MP_ISA_BUS 0
838 #define MP_MAX_IOAPIC_PIN 127
840 extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
842 static int mp_find_ioapic (int gsi)
844 int i = 0;
846 /* Find the IOAPIC that manages this GSI. */
847 for (i = 0; i < nr_ioapics; i++) {
848 if ((gsi >= mp_ioapic_routing[i].gsi_base)
849 && (gsi <= mp_ioapic_routing[i].gsi_end))
850 return i;
853 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
855 return -1;
858 static u8 uniq_ioapic_id(u8 id)
860 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
861 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
862 return io_apic_get_unique_id(nr_ioapics, id);
863 else
864 return id;
867 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
869 int idx = 0;
871 if (bad_ioapic(address))
872 return;
874 idx = nr_ioapics;
876 mp_ioapics[idx].mpc_type = MP_IOAPIC;
877 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
878 mp_ioapics[idx].mpc_apicaddr = address;
880 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
881 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
882 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
885 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
886 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
888 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
889 mp_ioapic_routing[idx].gsi_base = gsi_base;
890 mp_ioapic_routing[idx].gsi_end = gsi_base +
891 io_apic_get_redir_entries(idx);
893 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
894 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
895 mp_ioapics[idx].mpc_apicver,
896 mp_ioapics[idx].mpc_apicaddr,
897 mp_ioapic_routing[idx].gsi_base,
898 mp_ioapic_routing[idx].gsi_end);
900 nr_ioapics++;
903 void __init
904 mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
906 struct mpc_config_intsrc intsrc;
907 int ioapic = -1;
908 int pin = -1;
911 * Convert 'gsi' to 'ioapic.pin'.
913 ioapic = mp_find_ioapic(gsi);
914 if (ioapic < 0)
915 return;
916 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
919 * TBD: This check is for faulty timer entries, where the override
920 * erroneously sets the trigger to level, resulting in a HUGE
921 * increase of timer interrupts!
923 if ((bus_irq == 0) && (trigger == 3))
924 trigger = 1;
926 intsrc.mpc_type = MP_INTSRC;
927 intsrc.mpc_irqtype = mp_INT;
928 intsrc.mpc_irqflag = (trigger << 2) | polarity;
929 intsrc.mpc_srcbus = MP_ISA_BUS;
930 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
931 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
932 intsrc.mpc_dstirq = pin; /* INTIN# */
934 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
935 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
936 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
937 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
939 mp_irqs[mp_irq_entries] = intsrc;
940 if (++mp_irq_entries == MAX_IRQ_SOURCES)
941 panic("Max # of irq sources exceeded!\n");
944 int es7000_plat;
946 void __init mp_config_acpi_legacy_irqs (void)
948 struct mpc_config_intsrc intsrc;
949 int i = 0;
950 int ioapic = -1;
952 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
954 * Fabricate the legacy ISA bus (bus #31).
956 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
957 #endif
958 set_bit(MP_ISA_BUS, mp_bus_not_pci);
959 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
962 * Older generations of ES7000 have no legacy identity mappings
964 if (es7000_plat == 1)
965 return;
968 * Locate the IOAPIC that manages the ISA IRQs (0-15).
970 ioapic = mp_find_ioapic(0);
971 if (ioapic < 0)
972 return;
974 intsrc.mpc_type = MP_INTSRC;
975 intsrc.mpc_irqflag = 0; /* Conforming */
976 intsrc.mpc_srcbus = MP_ISA_BUS;
977 #ifdef CONFIG_X86_IO_APIC
978 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
979 #endif
981 * Use the default configuration for the IRQs 0-15. Unless
982 * overridden by (MADT) interrupt source override entries.
984 for (i = 0; i < 16; i++) {
985 int idx;
987 for (idx = 0; idx < mp_irq_entries; idx++) {
988 struct mpc_config_intsrc *irq = mp_irqs + idx;
990 /* Do we already have a mapping for this ISA IRQ? */
991 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
992 break;
994 /* Do we already have a mapping for this IOAPIC pin */
995 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
996 (irq->mpc_dstirq == i))
997 break;
1000 if (idx != mp_irq_entries) {
1001 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1002 continue; /* IRQ already used */
1005 intsrc.mpc_irqtype = mp_INT;
1006 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1007 intsrc.mpc_dstirq = i;
1009 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1010 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1011 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1012 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1013 intsrc.mpc_dstirq);
1015 mp_irqs[mp_irq_entries] = intsrc;
1016 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1017 panic("Max # of irq sources exceeded!\n");
1021 #define MAX_GSI_NUM 4096
1022 #define IRQ_COMPRESSION_START 64
1024 int mp_register_gsi(u32 gsi, int triggering, int polarity)
1026 int ioapic = -1;
1027 int ioapic_pin = 0;
1028 int idx, bit = 0;
1029 static int pci_irq = IRQ_COMPRESSION_START;
1031 * Mapping between Global System Interrupts, which
1032 * represent all possible interrupts, and IRQs
1033 * assigned to actual devices.
1035 static int gsi_to_irq[MAX_GSI_NUM];
1037 /* Don't set up the ACPI SCI because it's already set up */
1038 if (acpi_gbl_FADT.sci_interrupt == gsi)
1039 return gsi;
1041 ioapic = mp_find_ioapic(gsi);
1042 if (ioapic < 0) {
1043 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1044 return gsi;
1047 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1049 if (ioapic_renumber_irq)
1050 gsi = ioapic_renumber_irq(ioapic, gsi);
1053 * Avoid pin reprogramming. PRTs typically include entries
1054 * with redundant pin->gsi mappings (but unique PCI devices);
1055 * we only program the IOAPIC on the first.
1057 bit = ioapic_pin % 32;
1058 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1059 if (idx > 3) {
1060 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1061 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1062 ioapic_pin);
1063 return gsi;
1065 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1066 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1067 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1068 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1071 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1074 * For GSI >= 64, use IRQ compression
1076 if ((gsi >= IRQ_COMPRESSION_START)
1077 && (triggering == ACPI_LEVEL_SENSITIVE)) {
1079 * For PCI devices assign IRQs in order, avoiding gaps
1080 * due to unused I/O APIC pins.
1082 int irq = gsi;
1083 if (gsi < MAX_GSI_NUM) {
1085 * Retain the VIA chipset work-around (gsi > 15), but
1086 * avoid a problem where the 8254 timer (IRQ0) is setup
1087 * via an override (so it's not on pin 0 of the ioapic),
1088 * and at the same time, the pin 0 interrupt is a PCI
1089 * type. The gsi > 15 test could cause these two pins
1090 * to be shared as IRQ0, and they are not shareable.
1091 * So test for this condition, and if necessary, avoid
1092 * the pin collision.
1094 gsi = pci_irq++;
1096 * Don't assign IRQ used by ACPI SCI
1098 if (gsi == acpi_gbl_FADT.sci_interrupt)
1099 gsi = pci_irq++;
1100 gsi_to_irq[irq] = gsi;
1101 } else {
1102 printk(KERN_ERR "GSI %u is too high\n", gsi);
1103 return gsi;
1107 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1108 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1109 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1110 return gsi;
1113 #endif /* CONFIG_X86_IO_APIC */
1114 #endif /* CONFIG_ACPI */