x86: unify smp_read_mpc
[linux-2.6/mini2440.git] / arch / x86 / kernel / mpparse_32.c
blobc185065c3ebcebaa78fbe4f83add12aae84f1d19
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/acpi.h>
19 #include <linux/delay.h>
20 #include <linux/bootmem.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/bitops.h>
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/bios_ebda.h>
32 #include <mach_apic.h>
33 #include <mach_apicdef.h>
34 #include <mach_mpparse.h>
36 /* Have we found an MP table */
37 int smp_found_config;
40 * Various Linux-internal data structures created from the
41 * MP-table.
43 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
44 int mp_bus_id_to_type[MAX_MP_BUSSES];
45 #endif
46 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
47 int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 };
48 static int mp_current_pci_id;
50 int pic_mode;
53 * Intel MP BIOS table parsing routines:
57 * Checksum an MP configuration block.
60 static int __init mpf_checksum(unsigned char *mp, int len)
62 int sum = 0;
64 while (len--)
65 sum += *mp++;
67 return sum & 0xFF;
70 #ifdef CONFIG_X86_NUMAQ
72 * Have to match translation table entries to main table entries by counter
73 * hence the mpc_record variable .... can't see a less disgusting way of
74 * doing this ....
77 static int mpc_record;
78 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
79 __cpuinitdata;
80 #endif
82 static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
84 int apicid;
85 char *bootup_cpu = "";
87 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
88 disabled_cpus++;
89 return;
91 #ifdef CONFIG_X86_NUMAQ
92 apicid = mpc_apic_id(m, translation_table[mpc_record]);
93 #else
94 apicid = m->mpc_apicid;
95 #endif
96 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
97 bootup_cpu = " (Bootup-CPU)";
98 boot_cpu_physical_apicid = m->mpc_apicid;
101 printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
102 generic_processor_info(apicid, m->mpc_apicver);
105 static void __init MP_bus_info(struct mpc_config_bus *m)
107 char str[7];
109 memcpy(str, m->mpc_bustype, 6);
110 str[6] = 0;
112 #ifdef CONFIG_X86_NUMAQ
113 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
114 #else
115 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
116 #endif
118 #if MAX_MP_BUSSES < 256
119 if (m->mpc_busid >= MAX_MP_BUSSES) {
120 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
121 " is too large, max. supported is %d\n",
122 m->mpc_busid, str, MAX_MP_BUSSES - 1);
123 return;
125 #endif
127 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
128 set_bit(m->mpc_busid, mp_bus_not_pci);
129 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
130 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
131 #endif
132 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
133 #ifdef CONFIG_X86_NUMAQ
134 mpc_oem_pci_bus(m, translation_table[mpc_record]);
135 #endif
136 clear_bit(m->mpc_busid, mp_bus_not_pci);
137 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
138 mp_current_pci_id++;
139 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
140 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
141 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
142 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
143 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
144 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
145 #endif
146 } else
147 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
150 #ifdef CONFIG_X86_IO_APIC
152 static int bad_ioapic(unsigned long address)
154 if (nr_ioapics >= MAX_IO_APICS) {
155 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
156 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
157 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
159 if (!address) {
160 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
161 " found in table, skipping!\n");
162 return 1;
164 return 0;
167 static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
169 if (!(m->mpc_flags & MPC_APIC_USABLE))
170 return;
172 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
173 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
175 if (bad_ioapic(m->mpc_apicaddr))
176 return;
178 mp_ioapics[nr_ioapics] = *m;
179 nr_ioapics++;
182 static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
184 mp_irqs[mp_irq_entries] = *m;
185 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
186 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
187 m->mpc_irqtype, m->mpc_irqflag & 3,
188 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
189 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
190 if (++mp_irq_entries == MAX_IRQ_SOURCES)
191 panic("Max # of irq sources exceeded!!\n");
194 #endif
196 static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
198 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
199 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
200 m->mpc_irqtype, m->mpc_irqflag & 3,
201 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
202 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
205 #ifdef CONFIG_X86_NUMAQ
206 static void __init MP_translation_info(struct mpc_config_translation *m)
208 printk(KERN_INFO
209 "Translation: record %d, type %d, quad %d, global %d, local %d\n",
210 mpc_record, m->trans_type, m->trans_quad, m->trans_global,
211 m->trans_local);
213 if (mpc_record >= MAX_MPC_ENTRY)
214 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
215 else
216 translation_table[mpc_record] = m; /* stash this for later */
217 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
218 node_set_online(m->trans_quad);
222 * Read/parse the MPC oem tables
225 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
226 unsigned short oemsize)
228 int count = sizeof(*oemtable); /* the header size */
229 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
231 mpc_record = 0;
232 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
233 oemtable);
234 if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) {
235 printk(KERN_WARNING
236 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
237 oemtable->oem_signature[0], oemtable->oem_signature[1],
238 oemtable->oem_signature[2], oemtable->oem_signature[3]);
239 return;
241 if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) {
242 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
243 return;
245 while (count < oemtable->oem_length) {
246 switch (*oemptr) {
247 case MP_TRANSLATION:
249 struct mpc_config_translation *m =
250 (struct mpc_config_translation *)oemptr;
251 MP_translation_info(m);
252 oemptr += sizeof(*m);
253 count += sizeof(*m);
254 ++mpc_record;
255 break;
257 default:
259 printk(KERN_WARNING
260 "Unrecognised OEM table entry type! - %d\n",
261 (int)*oemptr);
262 return;
268 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
269 char *productid)
271 if (strncmp(oem, "IBM NUMA", 8))
272 printk("Warning! May not be a NUMA-Q system!\n");
273 if (mpc->mpc_oemptr)
274 smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr,
275 mpc->mpc_oemsize);
277 #endif /* CONFIG_X86_NUMAQ */
280 * Read/parse the MPC
283 static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
285 char str[16];
286 char oem[10];
287 int count = sizeof(*mpc);
288 unsigned char *mpt = ((unsigned char *)mpc) + count;
290 if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) {
291 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
292 mpc->mpc_signature[0], mpc->mpc_signature[1],
293 mpc->mpc_signature[2], mpc->mpc_signature[3]);
294 return 0;
296 if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) {
297 printk(KERN_ERR "MPTABLE: checksum error!\n");
298 return 0;
300 if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) {
301 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
302 mpc->mpc_spec);
303 return 0;
305 if (!mpc->mpc_lapic) {
306 printk(KERN_ERR "MPTABLE: null local APIC address!\n");
307 return 0;
309 memcpy(oem, mpc->mpc_oem, 8);
310 oem[8] = 0;
311 printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem);
313 memcpy(str, mpc->mpc_productid, 12);
314 str[12] = 0;
315 printk("Product ID: %s ", str);
317 #ifdef CONFIG_X86_32
318 mps_oem_check(mpc, oem, str);
319 #endif
320 printk(KERN_INFO "MPTABLE: Product ID: %s ", str);
322 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic);
324 /* save the local APIC address, it might be non-default */
325 if (!acpi_lapic)
326 mp_lapic_addr = mpc->mpc_lapic;
328 if (early)
329 return 1;
332 * Now process the configuration blocks.
334 #ifdef CONFIG_X86_NUMAQ
335 mpc_record = 0;
336 #endif
337 while (count < mpc->mpc_length) {
338 switch (*mpt) {
339 case MP_PROCESSOR:
341 struct mpc_config_processor *m =
342 (struct mpc_config_processor *)mpt;
343 /* ACPI may have already provided this data */
344 if (!acpi_lapic)
345 MP_processor_info(m);
346 mpt += sizeof(*m);
347 count += sizeof(*m);
348 break;
350 case MP_BUS:
352 struct mpc_config_bus *m =
353 (struct mpc_config_bus *)mpt;
354 MP_bus_info(m);
355 mpt += sizeof(*m);
356 count += sizeof(*m);
357 break;
359 case MP_IOAPIC:
361 #ifdef CONFIG_X86_IO_APIC
362 struct mpc_config_ioapic *m =
363 (struct mpc_config_ioapic *)mpt;
364 MP_ioapic_info(m);
365 #endif
366 mpt += sizeof(struct mpc_config_ioapic);
367 count += sizeof(struct mpc_config_ioapic);
368 break;
370 case MP_INTSRC:
372 #ifdef CONFIG_X86_IO_APIC
373 struct mpc_config_intsrc *m =
374 (struct mpc_config_intsrc *)mpt;
376 MP_intsrc_info(m);
377 #endif
378 mpt += sizeof(struct mpc_config_intsrc);
379 count += sizeof(struct mpc_config_intsrc);
380 break;
382 case MP_LINTSRC:
384 struct mpc_config_lintsrc *m =
385 (struct mpc_config_lintsrc *)mpt;
386 MP_lintsrc_info(m);
387 mpt += sizeof(*m);
388 count += sizeof(*m);
389 break;
391 default:
393 count = mpc->mpc_length;
394 break;
397 #ifdef CONFIG_X86_NUMAQ
398 ++mpc_record;
399 #endif
401 setup_apic_routing();
402 if (!num_processors)
403 printk(KERN_ERR "MPTABLE: no processors registered!\n");
404 return num_processors;
407 #ifdef CONFIG_X86_IO_APIC
409 static int __init ELCR_trigger(unsigned int irq)
411 unsigned int port;
413 port = 0x4d0 + (irq >> 3);
414 return (inb(port) >> (irq & 7)) & 1;
417 static void __init construct_default_ioirq_mptable(int mpc_default_type)
419 struct mpc_config_intsrc intsrc;
420 int i;
421 int ELCR_fallback = 0;
423 intsrc.mpc_type = MP_INTSRC;
424 intsrc.mpc_irqflag = 0; /* conforming */
425 intsrc.mpc_srcbus = 0;
426 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
428 intsrc.mpc_irqtype = mp_INT;
431 * If true, we have an ISA/PCI system with no IRQ entries
432 * in the MP table. To prevent the PCI interrupts from being set up
433 * incorrectly, we try to use the ELCR. The sanity check to see if
434 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
435 * never be level sensitive, so we simply see if the ELCR agrees.
436 * If it does, we assume it's valid.
438 if (mpc_default_type == 5) {
439 printk(KERN_INFO
440 "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
442 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2)
443 || ELCR_trigger(13))
444 printk(KERN_WARNING
445 "ELCR contains invalid data... not using ELCR\n");
446 else {
447 printk(KERN_INFO
448 "Using ELCR to identify PCI interrupts\n");
449 ELCR_fallback = 1;
453 for (i = 0; i < 16; i++) {
454 switch (mpc_default_type) {
455 case 2:
456 if (i == 0 || i == 13)
457 continue; /* IRQ0 & IRQ13 not connected */
458 /* fall through */
459 default:
460 if (i == 2)
461 continue; /* IRQ2 is never connected */
464 if (ELCR_fallback) {
466 * If the ELCR indicates a level-sensitive interrupt, we
467 * copy that information over to the MP table in the
468 * irqflag field (level sensitive, active high polarity).
470 if (ELCR_trigger(i))
471 intsrc.mpc_irqflag = 13;
472 else
473 intsrc.mpc_irqflag = 0;
476 intsrc.mpc_srcbusirq = i;
477 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
478 MP_intsrc_info(&intsrc);
481 intsrc.mpc_irqtype = mp_ExtINT;
482 intsrc.mpc_srcbusirq = 0;
483 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
484 MP_intsrc_info(&intsrc);
487 #endif
489 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
491 struct mpc_config_processor processor;
492 struct mpc_config_bus bus;
493 #ifdef CONFIG_X86_IO_APIC
494 struct mpc_config_ioapic ioapic;
495 #endif
496 struct mpc_config_lintsrc lintsrc;
497 int linttypes[2] = { mp_ExtINT, mp_NMI };
498 int i;
501 * local APIC has default address
503 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
506 * 2 CPUs, numbered 0 & 1.
508 processor.mpc_type = MP_PROCESSOR;
509 /* Either an integrated APIC or a discrete 82489DX. */
510 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
511 processor.mpc_cpuflag = CPU_ENABLED;
512 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
513 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
514 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
515 processor.mpc_reserved[0] = 0;
516 processor.mpc_reserved[1] = 0;
517 for (i = 0; i < 2; i++) {
518 processor.mpc_apicid = i;
519 MP_processor_info(&processor);
522 bus.mpc_type = MP_BUS;
523 bus.mpc_busid = 0;
524 switch (mpc_default_type) {
525 default:
526 printk("???\n");
527 printk(KERN_ERR "Unknown standard configuration %d\n",
528 mpc_default_type);
529 /* fall through */
530 case 1:
531 case 5:
532 memcpy(bus.mpc_bustype, "ISA ", 6);
533 break;
534 case 2:
535 case 6:
536 case 3:
537 memcpy(bus.mpc_bustype, "EISA ", 6);
538 break;
539 case 4:
540 case 7:
541 memcpy(bus.mpc_bustype, "MCA ", 6);
543 MP_bus_info(&bus);
544 if (mpc_default_type > 4) {
545 bus.mpc_busid = 1;
546 memcpy(bus.mpc_bustype, "PCI ", 6);
547 MP_bus_info(&bus);
550 #ifdef CONFIG_X86_IO_APIC
551 ioapic.mpc_type = MP_IOAPIC;
552 ioapic.mpc_apicid = 2;
553 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
554 ioapic.mpc_flags = MPC_APIC_USABLE;
555 ioapic.mpc_apicaddr = 0xFEC00000;
556 MP_ioapic_info(&ioapic);
559 * We set up most of the low 16 IO-APIC pins according to MPS rules.
561 construct_default_ioirq_mptable(mpc_default_type);
562 #endif
563 lintsrc.mpc_type = MP_LINTSRC;
564 lintsrc.mpc_irqflag = 0; /* conforming */
565 lintsrc.mpc_srcbusid = 0;
566 lintsrc.mpc_srcbusirq = 0;
567 lintsrc.mpc_destapic = MP_APIC_ALL;
568 for (i = 0; i < 2; i++) {
569 lintsrc.mpc_irqtype = linttypes[i];
570 lintsrc.mpc_destapiclint = i;
571 MP_lintsrc_info(&lintsrc);
575 static struct intel_mp_floating *mpf_found;
578 * Scan the memory blocks for an SMP configuration block.
580 static void __init __get_smp_config(unsigned early)
582 struct intel_mp_floating *mpf = mpf_found;
584 if (acpi_lapic && early)
585 return;
588 * ACPI supports both logical (e.g. Hyper-Threading) and physical
589 * processors, where MPS only supports physical.
591 if (acpi_lapic && acpi_ioapic) {
592 printk(KERN_INFO
593 "Using ACPI (MADT) for SMP configuration information\n");
594 return;
595 } else if (acpi_lapic)
596 printk(KERN_INFO
597 "Using ACPI for processor (LAPIC) configuration information\n");
599 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
600 mpf->mpf_specification);
601 if (mpf->mpf_feature2 & (1 << 7)) {
602 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
603 pic_mode = 1;
604 } else {
605 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
606 pic_mode = 0;
610 * Now see if we need to read further.
612 if (mpf->mpf_feature1 != 0) {
613 if (early) {
615 * local APIC has default address
617 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
618 return;
621 printk(KERN_INFO "Default MP configuration #%d\n",
622 mpf->mpf_feature1);
623 construct_default_ISA_mptable(mpf->mpf_feature1);
625 } else if (mpf->mpf_physptr) {
628 * Read the physical hardware table. Anything here will
629 * override the defaults.
631 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
632 smp_found_config = 0;
633 printk(KERN_ERR
634 "BIOS bug, MP table errors detected!...\n");
635 printk(KERN_ERR
636 "... disabling SMP support. (tell your hw vendor)\n");
637 return;
640 if (early)
641 return;
642 #ifdef CONFIG_X86_IO_APIC
644 * If there are no explicit MP IRQ entries, then we are
645 * broken. We set up most of the low 16 IO-APIC pins to
646 * ISA defaults and hope it will work.
648 if (!mp_irq_entries) {
649 struct mpc_config_bus bus;
651 printk(KERN_ERR
652 "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
654 bus.mpc_type = MP_BUS;
655 bus.mpc_busid = 0;
656 memcpy(bus.mpc_bustype, "ISA ", 6);
657 MP_bus_info(&bus);
659 construct_default_ioirq_mptable(0);
661 #endif
662 } else
663 BUG();
665 if (!early)
666 printk(KERN_INFO "Processors: %d\n", num_processors);
668 * Only use the first configuration found.
672 void __init early_get_smp_config(void)
674 __get_smp_config(1);
677 void __init get_smp_config(void)
679 __get_smp_config(0);
682 static int __init smp_scan_config(unsigned long base, unsigned long length,
683 unsigned reserve)
685 unsigned long *bp = phys_to_virt(base);
686 struct intel_mp_floating *mpf;
688 printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp, length);
689 if (sizeof(*mpf) != 16)
690 printk("Error: MPF size\n");
692 while (length > 0) {
693 mpf = (struct intel_mp_floating *)bp;
694 if ((*bp == SMP_MAGIC_IDENT) &&
695 (mpf->mpf_length == 1) &&
696 !mpf_checksum((unsigned char *)bp, 16) &&
697 ((mpf->mpf_specification == 1)
698 || (mpf->mpf_specification == 4))) {
700 smp_found_config = 1;
701 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
702 mpf, virt_to_phys(mpf));
703 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
704 BOOTMEM_DEFAULT);
705 if (mpf->mpf_physptr) {
707 * We cannot access to MPC table to compute
708 * table size yet, as only few megabytes from
709 * the bottom is mapped now.
710 * PC-9800's MPC table places on the very last
711 * of physical memory; so that simply reserving
712 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
713 * in reserve_bootmem.
715 unsigned long size = PAGE_SIZE;
716 unsigned long end = max_low_pfn * PAGE_SIZE;
717 if (mpf->mpf_physptr + size > end)
718 size = end - mpf->mpf_physptr;
719 reserve_bootmem(mpf->mpf_physptr, size,
720 BOOTMEM_DEFAULT);
723 mpf_found = mpf;
724 return 1;
726 bp += 4;
727 length -= 16;
729 return 0;
732 static void __init __find_smp_config(unsigned reserve)
734 unsigned int address;
737 * FIXME: Linux assumes you have 640K of base ram..
738 * this continues the error...
740 * 1) Scan the bottom 1K for a signature
741 * 2) Scan the top 1K of base RAM
742 * 3) Scan the 64K of bios
744 if (smp_scan_config(0x0, 0x400, reserve) ||
745 smp_scan_config(639 * 0x400, 0x400, reserve) ||
746 smp_scan_config(0xF0000, 0x10000, reserve))
747 return;
749 * If it is an SMP machine we should know now, unless the
750 * configuration is in an EISA/MCA bus machine with an
751 * extended bios data area.
753 * there is a real-mode segmented pointer pointing to the
754 * 4K EBDA area at 0x40E, calculate and scan it here.
756 * NOTE! There are Linux loaders that will corrupt the EBDA
757 * area, and as such this kind of SMP config may be less
758 * trustworthy, simply because the SMP table may have been
759 * stomped on during early boot. These loaders are buggy and
760 * should be fixed.
762 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
765 address = get_bios_ebda();
766 if (address)
767 smp_scan_config(address, 0x400, reserve);
770 void __init early_find_smp_config(void)
772 __find_smp_config(0);
775 void __init find_smp_config(void)
777 __find_smp_config(1);
780 /* --------------------------------------------------------------------------
781 ACPI-based MP Configuration
782 -------------------------------------------------------------------------- */
784 #ifdef CONFIG_ACPI
786 #ifdef CONFIG_X86_IO_APIC
788 #define MP_ISA_BUS 0
789 #define MP_MAX_IOAPIC_PIN 127
791 extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
793 static int mp_find_ioapic(int gsi)
795 int i = 0;
797 /* Find the IOAPIC that manages this GSI. */
798 for (i = 0; i < nr_ioapics; i++) {
799 if ((gsi >= mp_ioapic_routing[i].gsi_base)
800 && (gsi <= mp_ioapic_routing[i].gsi_end))
801 return i;
804 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
806 return -1;
809 static u8 uniq_ioapic_id(u8 id)
811 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
812 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
813 return io_apic_get_unique_id(nr_ioapics, id);
814 else
815 return id;
818 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
820 int idx = 0;
822 if (bad_ioapic(address))
823 return;
825 idx = nr_ioapics;
827 mp_ioapics[idx].mpc_type = MP_IOAPIC;
828 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
829 mp_ioapics[idx].mpc_apicaddr = address;
831 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
832 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
833 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
836 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
837 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
839 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
840 mp_ioapic_routing[idx].gsi_base = gsi_base;
841 mp_ioapic_routing[idx].gsi_end = gsi_base +
842 io_apic_get_redir_entries(idx);
844 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
845 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
846 mp_ioapics[idx].mpc_apicver,
847 mp_ioapics[idx].mpc_apicaddr,
848 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
850 nr_ioapics++;
853 void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
855 struct mpc_config_intsrc intsrc;
856 int ioapic = -1;
857 int pin = -1;
860 * Convert 'gsi' to 'ioapic.pin'.
862 ioapic = mp_find_ioapic(gsi);
863 if (ioapic < 0)
864 return;
865 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
868 * TBD: This check is for faulty timer entries, where the override
869 * erroneously sets the trigger to level, resulting in a HUGE
870 * increase of timer interrupts!
872 if ((bus_irq == 0) && (trigger == 3))
873 trigger = 1;
875 intsrc.mpc_type = MP_INTSRC;
876 intsrc.mpc_irqtype = mp_INT;
877 intsrc.mpc_irqflag = (trigger << 2) | polarity;
878 intsrc.mpc_srcbus = MP_ISA_BUS;
879 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
880 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
881 intsrc.mpc_dstirq = pin; /* INTIN# */
883 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
884 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
885 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
886 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
888 mp_irqs[mp_irq_entries] = intsrc;
889 if (++mp_irq_entries == MAX_IRQ_SOURCES)
890 panic("Max # of irq sources exceeded!\n");
893 int es7000_plat;
895 void __init mp_config_acpi_legacy_irqs(void)
897 struct mpc_config_intsrc intsrc;
898 int i = 0;
899 int ioapic = -1;
901 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
903 * Fabricate the legacy ISA bus (bus #31).
905 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
906 #endif
907 set_bit(MP_ISA_BUS, mp_bus_not_pci);
908 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
911 * Older generations of ES7000 have no legacy identity mappings
913 if (es7000_plat == 1)
914 return;
917 * Locate the IOAPIC that manages the ISA IRQs (0-15).
919 ioapic = mp_find_ioapic(0);
920 if (ioapic < 0)
921 return;
923 intsrc.mpc_type = MP_INTSRC;
924 intsrc.mpc_irqflag = 0; /* Conforming */
925 intsrc.mpc_srcbus = MP_ISA_BUS;
926 #ifdef CONFIG_X86_IO_APIC
927 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
928 #endif
930 * Use the default configuration for the IRQs 0-15. Unless
931 * overridden by (MADT) interrupt source override entries.
933 for (i = 0; i < 16; i++) {
934 int idx;
936 for (idx = 0; idx < mp_irq_entries; idx++) {
937 struct mpc_config_intsrc *irq = mp_irqs + idx;
939 /* Do we already have a mapping for this ISA IRQ? */
940 if (irq->mpc_srcbus == MP_ISA_BUS
941 && irq->mpc_srcbusirq == i)
942 break;
944 /* Do we already have a mapping for this IOAPIC pin */
945 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
946 (irq->mpc_dstirq == i))
947 break;
950 if (idx != mp_irq_entries) {
951 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
952 continue; /* IRQ already used */
955 intsrc.mpc_irqtype = mp_INT;
956 intsrc.mpc_srcbusirq = i; /* Identity mapped */
957 intsrc.mpc_dstirq = i;
959 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
960 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
961 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
962 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
963 intsrc.mpc_dstirq);
965 mp_irqs[mp_irq_entries] = intsrc;
966 if (++mp_irq_entries == MAX_IRQ_SOURCES)
967 panic("Max # of irq sources exceeded!\n");
971 #define MAX_GSI_NUM 4096
972 #define IRQ_COMPRESSION_START 64
974 int mp_register_gsi(u32 gsi, int triggering, int polarity)
976 int ioapic = -1;
977 int ioapic_pin = 0;
978 int idx, bit = 0;
979 static int pci_irq = IRQ_COMPRESSION_START;
981 * Mapping between Global System Interrupts, which
982 * represent all possible interrupts, and IRQs
983 * assigned to actual devices.
985 static int gsi_to_irq[MAX_GSI_NUM];
987 /* Don't set up the ACPI SCI because it's already set up */
988 if (acpi_gbl_FADT.sci_interrupt == gsi)
989 return gsi;
991 ioapic = mp_find_ioapic(gsi);
992 if (ioapic < 0) {
993 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
994 return gsi;
997 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
999 if (ioapic_renumber_irq)
1000 gsi = ioapic_renumber_irq(ioapic, gsi);
1003 * Avoid pin reprogramming. PRTs typically include entries
1004 * with redundant pin->gsi mappings (but unique PCI devices);
1005 * we only program the IOAPIC on the first.
1007 bit = ioapic_pin % 32;
1008 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1009 if (idx > 3) {
1010 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1011 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1012 ioapic_pin);
1013 return gsi;
1015 if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1016 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1017 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1018 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1021 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit);
1024 * For GSI >= 64, use IRQ compression
1026 if ((gsi >= IRQ_COMPRESSION_START)
1027 && (triggering == ACPI_LEVEL_SENSITIVE)) {
1029 * For PCI devices assign IRQs in order, avoiding gaps
1030 * due to unused I/O APIC pins.
1032 int irq = gsi;
1033 if (gsi < MAX_GSI_NUM) {
1035 * Retain the VIA chipset work-around (gsi > 15), but
1036 * avoid a problem where the 8254 timer (IRQ0) is setup
1037 * via an override (so it's not on pin 0 of the ioapic),
1038 * and at the same time, the pin 0 interrupt is a PCI
1039 * type. The gsi > 15 test could cause these two pins
1040 * to be shared as IRQ0, and they are not shareable.
1041 * So test for this condition, and if necessary, avoid
1042 * the pin collision.
1044 gsi = pci_irq++;
1046 * Don't assign IRQ used by ACPI SCI
1048 if (gsi == acpi_gbl_FADT.sci_interrupt)
1049 gsi = pci_irq++;
1050 gsi_to_irq[irq] = gsi;
1051 } else {
1052 printk(KERN_ERR "GSI %u is too high\n", gsi);
1053 return gsi;
1057 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1058 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1059 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1060 return gsi;
1063 #endif /* CONFIG_X86_IO_APIC */
1064 #endif /* CONFIG_ACPI */