x86: unify smp_scan_config
[linux-2.6/kmemtrace.git] / arch / x86 / kernel / mpparse_32.c
blob7feafa5040d8971b21056c232c78e4e4e80bfb3f
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/acpi.h>
19 #include <linux/delay.h>
20 #include <linux/bootmem.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/bitops.h>
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/bios_ebda.h>
32 #include <mach_apic.h>
33 #include <mach_apicdef.h>
34 #include <mach_mpparse.h>
36 /* Have we found an MP table */
37 int smp_found_config;
40 * Various Linux-internal data structures created from the
41 * MP-table.
43 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
44 int mp_bus_id_to_type[MAX_MP_BUSSES];
45 #endif
46 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
47 int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 };
48 static int mp_current_pci_id;
50 int pic_mode;
53 * Intel MP BIOS table parsing routines:
57 * Checksum an MP configuration block.
60 static int __init mpf_checksum(unsigned char *mp, int len)
62 int sum = 0;
64 while (len--)
65 sum += *mp++;
67 return sum & 0xFF;
70 #ifdef CONFIG_X86_NUMAQ
72 * Have to match translation table entries to main table entries by counter
73 * hence the mpc_record variable .... can't see a less disgusting way of
74 * doing this ....
77 static int mpc_record;
78 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
79 __cpuinitdata;
80 #endif
82 static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
84 int apicid;
85 char *bootup_cpu = "";
87 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
88 disabled_cpus++;
89 return;
91 #ifdef CONFIG_X86_NUMAQ
92 apicid = mpc_apic_id(m, translation_table[mpc_record]);
93 #else
94 apicid = m->mpc_apicid;
95 #endif
96 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
97 bootup_cpu = " (Bootup-CPU)";
98 boot_cpu_physical_apicid = m->mpc_apicid;
101 printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
102 generic_processor_info(apicid, m->mpc_apicver);
105 static void __init MP_bus_info(struct mpc_config_bus *m)
107 char str[7];
109 memcpy(str, m->mpc_bustype, 6);
110 str[6] = 0;
112 #ifdef CONFIG_X86_NUMAQ
113 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
114 #else
115 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
116 #endif
118 #if MAX_MP_BUSSES < 256
119 if (m->mpc_busid >= MAX_MP_BUSSES) {
120 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
121 " is too large, max. supported is %d\n",
122 m->mpc_busid, str, MAX_MP_BUSSES - 1);
123 return;
125 #endif
127 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
128 set_bit(m->mpc_busid, mp_bus_not_pci);
129 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
130 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
131 #endif
132 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
133 #ifdef CONFIG_X86_NUMAQ
134 mpc_oem_pci_bus(m, translation_table[mpc_record]);
135 #endif
136 clear_bit(m->mpc_busid, mp_bus_not_pci);
137 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
138 mp_current_pci_id++;
139 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
140 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
141 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
142 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
143 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
144 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
145 #endif
146 } else
147 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
150 #ifdef CONFIG_X86_IO_APIC
152 static int bad_ioapic(unsigned long address)
154 if (nr_ioapics >= MAX_IO_APICS) {
155 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
156 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
157 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
159 if (!address) {
160 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
161 " found in table, skipping!\n");
162 return 1;
164 return 0;
167 static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
169 if (!(m->mpc_flags & MPC_APIC_USABLE))
170 return;
172 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
173 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
175 if (bad_ioapic(m->mpc_apicaddr))
176 return;
178 mp_ioapics[nr_ioapics] = *m;
179 nr_ioapics++;
182 static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
184 mp_irqs[mp_irq_entries] = *m;
185 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
186 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
187 m->mpc_irqtype, m->mpc_irqflag & 3,
188 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
189 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
190 if (++mp_irq_entries == MAX_IRQ_SOURCES)
191 panic("Max # of irq sources exceeded!!\n");
194 #endif
196 static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
198 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
199 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
200 m->mpc_irqtype, m->mpc_irqflag & 3,
201 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
202 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
205 #ifdef CONFIG_X86_NUMAQ
206 static void __init MP_translation_info(struct mpc_config_translation *m)
208 printk(KERN_INFO
209 "Translation: record %d, type %d, quad %d, global %d, local %d\n",
210 mpc_record, m->trans_type, m->trans_quad, m->trans_global,
211 m->trans_local);
213 if (mpc_record >= MAX_MPC_ENTRY)
214 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
215 else
216 translation_table[mpc_record] = m; /* stash this for later */
217 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
218 node_set_online(m->trans_quad);
222 * Read/parse the MPC oem tables
225 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
226 unsigned short oemsize)
228 int count = sizeof(*oemtable); /* the header size */
229 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
231 mpc_record = 0;
232 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
233 oemtable);
234 if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) {
235 printk(KERN_WARNING
236 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
237 oemtable->oem_signature[0], oemtable->oem_signature[1],
238 oemtable->oem_signature[2], oemtable->oem_signature[3]);
239 return;
241 if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) {
242 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
243 return;
245 while (count < oemtable->oem_length) {
246 switch (*oemptr) {
247 case MP_TRANSLATION:
249 struct mpc_config_translation *m =
250 (struct mpc_config_translation *)oemptr;
251 MP_translation_info(m);
252 oemptr += sizeof(*m);
253 count += sizeof(*m);
254 ++mpc_record;
255 break;
257 default:
259 printk(KERN_WARNING
260 "Unrecognised OEM table entry type! - %d\n",
261 (int)*oemptr);
262 return;
268 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
269 char *productid)
271 if (strncmp(oem, "IBM NUMA", 8))
272 printk("Warning! May not be a NUMA-Q system!\n");
273 if (mpc->mpc_oemptr)
274 smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr,
275 mpc->mpc_oemsize);
277 #endif /* CONFIG_X86_NUMAQ */
280 * Read/parse the MPC
283 static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
285 char str[16];
286 char oem[10];
287 int count = sizeof(*mpc);
288 unsigned char *mpt = ((unsigned char *)mpc) + count;
290 if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) {
291 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
292 mpc->mpc_signature[0], mpc->mpc_signature[1],
293 mpc->mpc_signature[2], mpc->mpc_signature[3]);
294 return 0;
296 if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) {
297 printk(KERN_ERR "MPTABLE: checksum error!\n");
298 return 0;
300 if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) {
301 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
302 mpc->mpc_spec);
303 return 0;
305 if (!mpc->mpc_lapic) {
306 printk(KERN_ERR "MPTABLE: null local APIC address!\n");
307 return 0;
309 memcpy(oem, mpc->mpc_oem, 8);
310 oem[8] = 0;
311 printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem);
313 memcpy(str, mpc->mpc_productid, 12);
314 str[12] = 0;
315 printk("Product ID: %s ", str);
317 #ifdef CONFIG_X86_32
318 mps_oem_check(mpc, oem, str);
319 #endif
320 printk(KERN_INFO "MPTABLE: Product ID: %s ", str);
322 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic);
324 /* save the local APIC address, it might be non-default */
325 if (!acpi_lapic)
326 mp_lapic_addr = mpc->mpc_lapic;
328 if (early)
329 return 1;
332 * Now process the configuration blocks.
334 #ifdef CONFIG_X86_NUMAQ
335 mpc_record = 0;
336 #endif
337 while (count < mpc->mpc_length) {
338 switch (*mpt) {
339 case MP_PROCESSOR:
341 struct mpc_config_processor *m =
342 (struct mpc_config_processor *)mpt;
343 /* ACPI may have already provided this data */
344 if (!acpi_lapic)
345 MP_processor_info(m);
346 mpt += sizeof(*m);
347 count += sizeof(*m);
348 break;
350 case MP_BUS:
352 struct mpc_config_bus *m =
353 (struct mpc_config_bus *)mpt;
354 MP_bus_info(m);
355 mpt += sizeof(*m);
356 count += sizeof(*m);
357 break;
359 case MP_IOAPIC:
361 #ifdef CONFIG_X86_IO_APIC
362 struct mpc_config_ioapic *m =
363 (struct mpc_config_ioapic *)mpt;
364 MP_ioapic_info(m);
365 #endif
366 mpt += sizeof(struct mpc_config_ioapic);
367 count += sizeof(struct mpc_config_ioapic);
368 break;
370 case MP_INTSRC:
372 #ifdef CONFIG_X86_IO_APIC
373 struct mpc_config_intsrc *m =
374 (struct mpc_config_intsrc *)mpt;
376 MP_intsrc_info(m);
377 #endif
378 mpt += sizeof(struct mpc_config_intsrc);
379 count += sizeof(struct mpc_config_intsrc);
380 break;
382 case MP_LINTSRC:
384 struct mpc_config_lintsrc *m =
385 (struct mpc_config_lintsrc *)mpt;
386 MP_lintsrc_info(m);
387 mpt += sizeof(*m);
388 count += sizeof(*m);
389 break;
391 default:
393 count = mpc->mpc_length;
394 break;
397 #ifdef CONFIG_X86_NUMAQ
398 ++mpc_record;
399 #endif
401 setup_apic_routing();
402 if (!num_processors)
403 printk(KERN_ERR "MPTABLE: no processors registered!\n");
404 return num_processors;
407 #ifdef CONFIG_X86_IO_APIC
409 static int __init ELCR_trigger(unsigned int irq)
411 unsigned int port;
413 port = 0x4d0 + (irq >> 3);
414 return (inb(port) >> (irq & 7)) & 1;
417 static void __init construct_default_ioirq_mptable(int mpc_default_type)
419 struct mpc_config_intsrc intsrc;
420 int i;
421 int ELCR_fallback = 0;
423 intsrc.mpc_type = MP_INTSRC;
424 intsrc.mpc_irqflag = 0; /* conforming */
425 intsrc.mpc_srcbus = 0;
426 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
428 intsrc.mpc_irqtype = mp_INT;
431 * If true, we have an ISA/PCI system with no IRQ entries
432 * in the MP table. To prevent the PCI interrupts from being set up
433 * incorrectly, we try to use the ELCR. The sanity check to see if
434 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
435 * never be level sensitive, so we simply see if the ELCR agrees.
436 * If it does, we assume it's valid.
438 if (mpc_default_type == 5) {
439 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... "
440 "falling back to ELCR\n");
442 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
443 ELCR_trigger(13))
444 printk(KERN_ERR "ELCR contains invalid data... "
445 "not using ELCR\n");
446 else {
447 printk(KERN_INFO
448 "Using ELCR to identify PCI interrupts\n");
449 ELCR_fallback = 1;
453 for (i = 0; i < 16; i++) {
454 switch (mpc_default_type) {
455 case 2:
456 if (i == 0 || i == 13)
457 continue; /* IRQ0 & IRQ13 not connected */
458 /* fall through */
459 default:
460 if (i == 2)
461 continue; /* IRQ2 is never connected */
464 if (ELCR_fallback) {
466 * If the ELCR indicates a level-sensitive interrupt, we
467 * copy that information over to the MP table in the
468 * irqflag field (level sensitive, active high polarity).
470 if (ELCR_trigger(i))
471 intsrc.mpc_irqflag = 13;
472 else
473 intsrc.mpc_irqflag = 0;
476 intsrc.mpc_srcbusirq = i;
477 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
478 MP_intsrc_info(&intsrc);
481 intsrc.mpc_irqtype = mp_ExtINT;
482 intsrc.mpc_srcbusirq = 0;
483 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
484 MP_intsrc_info(&intsrc);
487 #endif
489 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
491 struct mpc_config_processor processor;
492 struct mpc_config_bus bus;
493 #ifdef CONFIG_X86_IO_APIC
494 struct mpc_config_ioapic ioapic;
495 #endif
496 struct mpc_config_lintsrc lintsrc;
497 int linttypes[2] = { mp_ExtINT, mp_NMI };
498 int i;
501 * local APIC has default address
503 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
506 * 2 CPUs, numbered 0 & 1.
508 processor.mpc_type = MP_PROCESSOR;
509 /* Either an integrated APIC or a discrete 82489DX. */
510 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
511 processor.mpc_cpuflag = CPU_ENABLED;
512 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
513 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
514 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
515 processor.mpc_reserved[0] = 0;
516 processor.mpc_reserved[1] = 0;
517 for (i = 0; i < 2; i++) {
518 processor.mpc_apicid = i;
519 MP_processor_info(&processor);
522 bus.mpc_type = MP_BUS;
523 bus.mpc_busid = 0;
524 switch (mpc_default_type) {
525 default:
526 printk(KERN_ERR "???\nUnknown standard configuration %d\n",
527 mpc_default_type);
528 /* fall through */
529 case 1:
530 case 5:
531 memcpy(bus.mpc_bustype, "ISA ", 6);
532 break;
533 case 2:
534 case 6:
535 case 3:
536 memcpy(bus.mpc_bustype, "EISA ", 6);
537 break;
538 case 4:
539 case 7:
540 memcpy(bus.mpc_bustype, "MCA ", 6);
542 MP_bus_info(&bus);
543 if (mpc_default_type > 4) {
544 bus.mpc_busid = 1;
545 memcpy(bus.mpc_bustype, "PCI ", 6);
546 MP_bus_info(&bus);
549 #ifdef CONFIG_X86_IO_APIC
550 ioapic.mpc_type = MP_IOAPIC;
551 ioapic.mpc_apicid = 2;
552 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
553 ioapic.mpc_flags = MPC_APIC_USABLE;
554 ioapic.mpc_apicaddr = 0xFEC00000;
555 MP_ioapic_info(&ioapic);
558 * We set up most of the low 16 IO-APIC pins according to MPS rules.
560 construct_default_ioirq_mptable(mpc_default_type);
561 #endif
562 lintsrc.mpc_type = MP_LINTSRC;
563 lintsrc.mpc_irqflag = 0; /* conforming */
564 lintsrc.mpc_srcbusid = 0;
565 lintsrc.mpc_srcbusirq = 0;
566 lintsrc.mpc_destapic = MP_APIC_ALL;
567 for (i = 0; i < 2; i++) {
568 lintsrc.mpc_irqtype = linttypes[i];
569 lintsrc.mpc_destapiclint = i;
570 MP_lintsrc_info(&lintsrc);
574 static struct intel_mp_floating *mpf_found;
577 * Scan the memory blocks for an SMP configuration block.
579 static void __init __get_smp_config(unsigned early)
581 struct intel_mp_floating *mpf = mpf_found;
583 if (acpi_lapic && early)
584 return;
587 * ACPI supports both logical (e.g. Hyper-Threading) and physical
588 * processors, where MPS only supports physical.
590 if (acpi_lapic && acpi_ioapic) {
591 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
592 "information\n");
593 return;
594 } else if (acpi_lapic)
595 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
596 "configuration information\n");
598 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
599 mpf->mpf_specification);
600 #ifdef CONFIG_X86_32
601 if (mpf->mpf_feature2 & (1 << 7)) {
602 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
603 pic_mode = 1;
604 } else {
605 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
606 pic_mode = 0;
608 #endif
610 * Now see if we need to read further.
612 if (mpf->mpf_feature1 != 0) {
613 if (early) {
615 * local APIC has default address
617 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
618 return;
621 printk(KERN_INFO "Default MP configuration #%d\n",
622 mpf->mpf_feature1);
623 construct_default_ISA_mptable(mpf->mpf_feature1);
625 } else if (mpf->mpf_physptr) {
628 * Read the physical hardware table. Anything here will
629 * override the defaults.
631 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
632 smp_found_config = 0;
633 printk(KERN_ERR
634 "BIOS bug, MP table errors detected!...\n");
635 printk(KERN_ERR "... disabling SMP support. "
636 "(tell your hw vendor)\n");
637 return;
640 if (early)
641 return;
642 #ifdef CONFIG_X86_IO_APIC
644 * If there are no explicit MP IRQ entries, then we are
645 * broken. We set up most of the low 16 IO-APIC pins to
646 * ISA defaults and hope it will work.
648 if (!mp_irq_entries) {
649 struct mpc_config_bus bus;
651 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
652 "using default mptable. "
653 "(tell your hw vendor)\n");
655 bus.mpc_type = MP_BUS;
656 bus.mpc_busid = 0;
657 memcpy(bus.mpc_bustype, "ISA ", 6);
658 MP_bus_info(&bus);
660 construct_default_ioirq_mptable(0);
662 #endif
663 } else
664 BUG();
666 if (!early)
667 printk(KERN_INFO "Processors: %d\n", num_processors);
669 * Only use the first configuration found.
673 void __init early_get_smp_config(void)
675 __get_smp_config(1);
678 void __init get_smp_config(void)
680 __get_smp_config(0);
683 static int __init smp_scan_config(unsigned long base, unsigned long length,
684 unsigned reserve)
686 extern void __bad_mpf_size(void);
687 unsigned int *bp = phys_to_virt(base);
688 struct intel_mp_floating *mpf;
690 Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length);
691 if (sizeof(*mpf) != 16)
692 __bad_mpf_size();
694 while (length > 0) {
695 mpf = (struct intel_mp_floating *)bp;
696 if ((*bp == SMP_MAGIC_IDENT) &&
697 (mpf->mpf_length == 1) &&
698 !mpf_checksum((unsigned char *)bp, 16) &&
699 ((mpf->mpf_specification == 1)
700 || (mpf->mpf_specification == 4))) {
702 smp_found_config = 1;
703 mpf_found = mpf;
704 #ifdef CONFIG_X86_32
705 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
706 mpf, virt_to_phys(mpf));
707 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
708 BOOTMEM_DEFAULT);
709 if (mpf->mpf_physptr) {
711 * We cannot access to MPC table to compute
712 * table size yet, as only few megabytes from
713 * the bottom is mapped now.
714 * PC-9800's MPC table places on the very last
715 * of physical memory; so that simply reserving
716 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
717 * in reserve_bootmem.
719 unsigned long size = PAGE_SIZE;
720 unsigned long end = max_low_pfn * PAGE_SIZE;
721 if (mpf->mpf_physptr + size > end)
722 size = end - mpf->mpf_physptr;
723 reserve_bootmem(mpf->mpf_physptr, size,
724 BOOTMEM_DEFAULT);
727 #else
728 if (!reserve)
729 return 1;
731 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
732 if (mpf->mpf_physptr)
733 reserve_bootmem_generic(mpf->mpf_physptr,
734 PAGE_SIZE);
735 #endif
736 return 1;
738 bp += 4;
739 length -= 16;
741 return 0;
744 static void __init __find_smp_config(unsigned reserve)
746 unsigned int address;
749 * FIXME: Linux assumes you have 640K of base ram..
750 * this continues the error...
752 * 1) Scan the bottom 1K for a signature
753 * 2) Scan the top 1K of base RAM
754 * 3) Scan the 64K of bios
756 if (smp_scan_config(0x0, 0x400, reserve) ||
757 smp_scan_config(639 * 0x400, 0x400, reserve) ||
758 smp_scan_config(0xF0000, 0x10000, reserve))
759 return;
761 * If it is an SMP machine we should know now, unless the
762 * configuration is in an EISA/MCA bus machine with an
763 * extended bios data area.
765 * there is a real-mode segmented pointer pointing to the
766 * 4K EBDA area at 0x40E, calculate and scan it here.
768 * NOTE! There are Linux loaders that will corrupt the EBDA
769 * area, and as such this kind of SMP config may be less
770 * trustworthy, simply because the SMP table may have been
771 * stomped on during early boot. These loaders are buggy and
772 * should be fixed.
774 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
777 address = get_bios_ebda();
778 if (address)
779 smp_scan_config(address, 0x400, reserve);
782 void __init early_find_smp_config(void)
784 __find_smp_config(0);
787 void __init find_smp_config(void)
789 __find_smp_config(1);
792 /* --------------------------------------------------------------------------
793 ACPI-based MP Configuration
794 -------------------------------------------------------------------------- */
796 #ifdef CONFIG_ACPI
798 #ifdef CONFIG_X86_IO_APIC
800 #define MP_ISA_BUS 0
801 #define MP_MAX_IOAPIC_PIN 127
803 extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
805 static int mp_find_ioapic(int gsi)
807 int i = 0;
809 /* Find the IOAPIC that manages this GSI. */
810 for (i = 0; i < nr_ioapics; i++) {
811 if ((gsi >= mp_ioapic_routing[i].gsi_base)
812 && (gsi <= mp_ioapic_routing[i].gsi_end))
813 return i;
816 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
818 return -1;
821 static u8 uniq_ioapic_id(u8 id)
823 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
824 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
825 return io_apic_get_unique_id(nr_ioapics, id);
826 else
827 return id;
830 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
832 int idx = 0;
834 if (bad_ioapic(address))
835 return;
837 idx = nr_ioapics;
839 mp_ioapics[idx].mpc_type = MP_IOAPIC;
840 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
841 mp_ioapics[idx].mpc_apicaddr = address;
843 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
844 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
845 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
848 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
849 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
851 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
852 mp_ioapic_routing[idx].gsi_base = gsi_base;
853 mp_ioapic_routing[idx].gsi_end = gsi_base +
854 io_apic_get_redir_entries(idx);
856 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
857 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
858 mp_ioapics[idx].mpc_apicver,
859 mp_ioapics[idx].mpc_apicaddr,
860 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
862 nr_ioapics++;
865 void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
867 struct mpc_config_intsrc intsrc;
868 int ioapic = -1;
869 int pin = -1;
872 * Convert 'gsi' to 'ioapic.pin'.
874 ioapic = mp_find_ioapic(gsi);
875 if (ioapic < 0)
876 return;
877 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
880 * TBD: This check is for faulty timer entries, where the override
881 * erroneously sets the trigger to level, resulting in a HUGE
882 * increase of timer interrupts!
884 if ((bus_irq == 0) && (trigger == 3))
885 trigger = 1;
887 intsrc.mpc_type = MP_INTSRC;
888 intsrc.mpc_irqtype = mp_INT;
889 intsrc.mpc_irqflag = (trigger << 2) | polarity;
890 intsrc.mpc_srcbus = MP_ISA_BUS;
891 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
892 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
893 intsrc.mpc_dstirq = pin; /* INTIN# */
895 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
896 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
897 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
898 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
900 mp_irqs[mp_irq_entries] = intsrc;
901 if (++mp_irq_entries == MAX_IRQ_SOURCES)
902 panic("Max # of irq sources exceeded!\n");
905 int es7000_plat;
907 void __init mp_config_acpi_legacy_irqs(void)
909 struct mpc_config_intsrc intsrc;
910 int i = 0;
911 int ioapic = -1;
913 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
915 * Fabricate the legacy ISA bus (bus #31).
917 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
918 #endif
919 set_bit(MP_ISA_BUS, mp_bus_not_pci);
920 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
923 * Older generations of ES7000 have no legacy identity mappings
925 if (es7000_plat == 1)
926 return;
929 * Locate the IOAPIC that manages the ISA IRQs (0-15).
931 ioapic = mp_find_ioapic(0);
932 if (ioapic < 0)
933 return;
935 intsrc.mpc_type = MP_INTSRC;
936 intsrc.mpc_irqflag = 0; /* Conforming */
937 intsrc.mpc_srcbus = MP_ISA_BUS;
938 #ifdef CONFIG_X86_IO_APIC
939 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
940 #endif
942 * Use the default configuration for the IRQs 0-15. Unless
943 * overridden by (MADT) interrupt source override entries.
945 for (i = 0; i < 16; i++) {
946 int idx;
948 for (idx = 0; idx < mp_irq_entries; idx++) {
949 struct mpc_config_intsrc *irq = mp_irqs + idx;
951 /* Do we already have a mapping for this ISA IRQ? */
952 if (irq->mpc_srcbus == MP_ISA_BUS
953 && irq->mpc_srcbusirq == i)
954 break;
956 /* Do we already have a mapping for this IOAPIC pin */
957 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
958 (irq->mpc_dstirq == i))
959 break;
962 if (idx != mp_irq_entries) {
963 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
964 continue; /* IRQ already used */
967 intsrc.mpc_irqtype = mp_INT;
968 intsrc.mpc_srcbusirq = i; /* Identity mapped */
969 intsrc.mpc_dstirq = i;
971 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
972 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
973 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
974 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
975 intsrc.mpc_dstirq);
977 mp_irqs[mp_irq_entries] = intsrc;
978 if (++mp_irq_entries == MAX_IRQ_SOURCES)
979 panic("Max # of irq sources exceeded!\n");
983 #define MAX_GSI_NUM 4096
984 #define IRQ_COMPRESSION_START 64
986 int mp_register_gsi(u32 gsi, int triggering, int polarity)
988 int ioapic = -1;
989 int ioapic_pin = 0;
990 int idx, bit = 0;
991 static int pci_irq = IRQ_COMPRESSION_START;
993 * Mapping between Global System Interrupts, which
994 * represent all possible interrupts, and IRQs
995 * assigned to actual devices.
997 static int gsi_to_irq[MAX_GSI_NUM];
999 /* Don't set up the ACPI SCI because it's already set up */
1000 if (acpi_gbl_FADT.sci_interrupt == gsi)
1001 return gsi;
1003 ioapic = mp_find_ioapic(gsi);
1004 if (ioapic < 0) {
1005 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1006 return gsi;
1009 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1011 if (ioapic_renumber_irq)
1012 gsi = ioapic_renumber_irq(ioapic, gsi);
1015 * Avoid pin reprogramming. PRTs typically include entries
1016 * with redundant pin->gsi mappings (but unique PCI devices);
1017 * we only program the IOAPIC on the first.
1019 bit = ioapic_pin % 32;
1020 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1021 if (idx > 3) {
1022 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1023 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1024 ioapic_pin);
1025 return gsi;
1027 if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1028 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1029 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1030 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1033 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit);
1036 * For GSI >= 64, use IRQ compression
1038 if ((gsi >= IRQ_COMPRESSION_START)
1039 && (triggering == ACPI_LEVEL_SENSITIVE)) {
1041 * For PCI devices assign IRQs in order, avoiding gaps
1042 * due to unused I/O APIC pins.
1044 int irq = gsi;
1045 if (gsi < MAX_GSI_NUM) {
1047 * Retain the VIA chipset work-around (gsi > 15), but
1048 * avoid a problem where the 8254 timer (IRQ0) is setup
1049 * via an override (so it's not on pin 0 of the ioapic),
1050 * and at the same time, the pin 0 interrupt is a PCI
1051 * type. The gsi > 15 test could cause these two pins
1052 * to be shared as IRQ0, and they are not shareable.
1053 * So test for this condition, and if necessary, avoid
1054 * the pin collision.
1056 gsi = pci_irq++;
1058 * Don't assign IRQ used by ACPI SCI
1060 if (gsi == acpi_gbl_FADT.sci_interrupt)
1061 gsi = pci_irq++;
1062 gsi_to_irq[irq] = gsi;
1063 } else {
1064 printk(KERN_ERR "GSI %u is too high\n", gsi);
1065 return gsi;
1069 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1070 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1071 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1072 return gsi;
1075 #endif /* CONFIG_X86_IO_APIC */
1076 #endif /* CONFIG_ACPI */