mm: page_alloc: embed OOM killing naturally into allocation slowpath
[linux-2.6/btrfs-unstable.git] / arch / x86 / kernel / mpparse.c
blob2d2a237f2c73698a4dd2819800edd6179239904b
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */
10 #include <linux/mm.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/bootmem.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/module.h>
20 #include <linux/smp.h>
21 #include <linux/pci.h>
22 #include <linux/irqdomain.h>
24 #include <asm/mtrr.h>
25 #include <asm/mpspec.h>
26 #include <asm/pgalloc.h>
27 #include <asm/io_apic.h>
28 #include <asm/proto.h>
29 #include <asm/bios_ebda.h>
30 #include <asm/e820.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
34 #include <asm/apic.h>
36 * Checksum an MP configuration block.
39 static int __init mpf_checksum(unsigned char *mp, int len)
41 int sum = 0;
43 while (len--)
44 sum += *mp++;
46 return sum & 0xFF;
49 int __init default_mpc_apic_id(struct mpc_cpu *m)
51 return m->apicid;
54 static void __init MP_processor_info(struct mpc_cpu *m)
56 int apicid;
57 char *bootup_cpu = "";
59 if (!(m->cpuflag & CPU_ENABLED)) {
60 disabled_cpus++;
61 return;
64 apicid = x86_init.mpparse.mpc_apic_id(m);
66 if (m->cpuflag & CPU_BOOTPROCESSOR) {
67 bootup_cpu = " (Bootup-CPU)";
68 boot_cpu_physical_apicid = m->apicid;
71 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
72 generic_processor_info(apicid, m->apicver);
75 #ifdef CONFIG_X86_IO_APIC
76 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
78 memcpy(str, m->bustype, 6);
79 str[6] = 0;
80 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
83 static void __init MP_bus_info(struct mpc_bus *m)
85 char str[7];
87 x86_init.mpparse.mpc_oem_bus_info(m, str);
89 #if MAX_MP_BUSSES < 256
90 if (m->busid >= MAX_MP_BUSSES) {
91 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
92 m->busid, str, MAX_MP_BUSSES - 1);
93 return;
95 #endif
97 set_bit(m->busid, mp_bus_not_pci);
98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
99 #ifdef CONFIG_EISA
100 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101 #endif
102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103 if (x86_init.mpparse.mpc_oem_pci_bus)
104 x86_init.mpparse.mpc_oem_pci_bus(m);
106 clear_bit(m->busid, mp_bus_not_pci);
107 #ifdef CONFIG_EISA
108 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111 #endif
112 } else
113 pr_warn("Unknown bustype %s - ignoring\n", str);
116 static struct irq_domain_ops mp_ioapic_irqdomain_ops = {
117 .map = mp_irqdomain_map,
118 .unmap = mp_irqdomain_unmap,
121 static void __init MP_ioapic_info(struct mpc_ioapic *m)
123 struct ioapic_domain_cfg cfg = {
124 .type = IOAPIC_DOMAIN_LEGACY,
125 .ops = &mp_ioapic_irqdomain_ops,
128 if (m->flags & MPC_APIC_USABLE)
129 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
132 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
134 apic_printk(APIC_VERBOSE,
135 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
136 mp_irq->irqtype, mp_irq->irqflag & 3,
137 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
138 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
141 #else /* CONFIG_X86_IO_APIC */
142 static inline void __init MP_bus_info(struct mpc_bus *m) {}
143 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
144 #endif /* CONFIG_X86_IO_APIC */
146 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
148 apic_printk(APIC_VERBOSE,
149 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
150 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
151 m->srcbusirq, m->destapic, m->destapiclint);
155 * Read/parse the MPC
157 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
160 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
161 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
162 mpc->signature[0], mpc->signature[1],
163 mpc->signature[2], mpc->signature[3]);
164 return 0;
166 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
167 pr_err("MPTABLE: checksum error!\n");
168 return 0;
170 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
171 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
172 return 0;
174 if (!mpc->lapic) {
175 pr_err("MPTABLE: null local APIC address!\n");
176 return 0;
178 memcpy(oem, mpc->oem, 8);
179 oem[8] = 0;
180 pr_info("MPTABLE: OEM ID: %s\n", oem);
182 memcpy(str, mpc->productid, 12);
183 str[12] = 0;
185 pr_info("MPTABLE: Product ID: %s\n", str);
187 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
189 return 1;
192 static void skip_entry(unsigned char **ptr, int *count, int size)
194 *ptr += size;
195 *count += size;
198 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
200 pr_err("Your mptable is wrong, contact your HW vendor!\n");
201 pr_cont("type %x\n", *mpt);
202 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
203 1, mpc, mpc->length, 1);
206 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
208 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
210 char str[16];
211 char oem[10];
213 int count = sizeof(*mpc);
214 unsigned char *mpt = ((unsigned char *)mpc) + count;
216 if (!smp_check_mpc(mpc, oem, str))
217 return 0;
219 /* Initialize the lapic mapping */
220 if (!acpi_lapic)
221 register_lapic_address(mpc->lapic);
223 if (early)
224 return 1;
226 if (mpc->oemptr)
227 x86_init.mpparse.smp_read_mpc_oem(mpc);
230 * Now process the configuration blocks.
232 x86_init.mpparse.mpc_record(0);
234 while (count < mpc->length) {
235 switch (*mpt) {
236 case MP_PROCESSOR:
237 /* ACPI may have already provided this data */
238 if (!acpi_lapic)
239 MP_processor_info((struct mpc_cpu *)mpt);
240 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
241 break;
242 case MP_BUS:
243 MP_bus_info((struct mpc_bus *)mpt);
244 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
245 break;
246 case MP_IOAPIC:
247 MP_ioapic_info((struct mpc_ioapic *)mpt);
248 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
249 break;
250 case MP_INTSRC:
251 mp_save_irq((struct mpc_intsrc *)mpt);
252 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
253 break;
254 case MP_LINTSRC:
255 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
256 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
257 break;
258 default:
259 /* wrong mptable */
260 smp_dump_mptable(mpc, mpt);
261 count = mpc->length;
262 break;
264 x86_init.mpparse.mpc_record(1);
267 if (!num_processors)
268 pr_err("MPTABLE: no processors registered!\n");
269 return num_processors;
272 #ifdef CONFIG_X86_IO_APIC
274 static int __init ELCR_trigger(unsigned int irq)
276 unsigned int port;
278 port = 0x4d0 + (irq >> 3);
279 return (inb(port) >> (irq & 7)) & 1;
282 static void __init construct_default_ioirq_mptable(int mpc_default_type)
284 struct mpc_intsrc intsrc;
285 int i;
286 int ELCR_fallback = 0;
288 intsrc.type = MP_INTSRC;
289 intsrc.irqflag = 0; /* conforming */
290 intsrc.srcbus = 0;
291 intsrc.dstapic = mpc_ioapic_id(0);
293 intsrc.irqtype = mp_INT;
296 * If true, we have an ISA/PCI system with no IRQ entries
297 * in the MP table. To prevent the PCI interrupts from being set up
298 * incorrectly, we try to use the ELCR. The sanity check to see if
299 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
300 * never be level sensitive, so we simply see if the ELCR agrees.
301 * If it does, we assume it's valid.
303 if (mpc_default_type == 5) {
304 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
306 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
307 ELCR_trigger(13))
308 pr_err("ELCR contains invalid data... not using ELCR\n");
309 else {
310 pr_info("Using ELCR to identify PCI interrupts\n");
311 ELCR_fallback = 1;
315 for (i = 0; i < 16; i++) {
316 switch (mpc_default_type) {
317 case 2:
318 if (i == 0 || i == 13)
319 continue; /* IRQ0 & IRQ13 not connected */
320 /* fall through */
321 default:
322 if (i == 2)
323 continue; /* IRQ2 is never connected */
326 if (ELCR_fallback) {
328 * If the ELCR indicates a level-sensitive interrupt, we
329 * copy that information over to the MP table in the
330 * irqflag field (level sensitive, active high polarity).
332 if (ELCR_trigger(i))
333 intsrc.irqflag = 13;
334 else
335 intsrc.irqflag = 0;
338 intsrc.srcbusirq = i;
339 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
340 mp_save_irq(&intsrc);
343 intsrc.irqtype = mp_ExtINT;
344 intsrc.srcbusirq = 0;
345 intsrc.dstirq = 0; /* 8259A to INTIN0 */
346 mp_save_irq(&intsrc);
350 static void __init construct_ioapic_table(int mpc_default_type)
352 struct mpc_ioapic ioapic;
353 struct mpc_bus bus;
355 bus.type = MP_BUS;
356 bus.busid = 0;
357 switch (mpc_default_type) {
358 default:
359 pr_err("???\nUnknown standard configuration %d\n",
360 mpc_default_type);
361 /* fall through */
362 case 1:
363 case 5:
364 memcpy(bus.bustype, "ISA ", 6);
365 break;
366 case 2:
367 case 6:
368 case 3:
369 memcpy(bus.bustype, "EISA ", 6);
370 break;
372 MP_bus_info(&bus);
373 if (mpc_default_type > 4) {
374 bus.busid = 1;
375 memcpy(bus.bustype, "PCI ", 6);
376 MP_bus_info(&bus);
379 ioapic.type = MP_IOAPIC;
380 ioapic.apicid = 2;
381 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
382 ioapic.flags = MPC_APIC_USABLE;
383 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
384 MP_ioapic_info(&ioapic);
387 * We set up most of the low 16 IO-APIC pins according to MPS rules.
389 construct_default_ioirq_mptable(mpc_default_type);
391 #else
392 static inline void __init construct_ioapic_table(int mpc_default_type) { }
393 #endif
395 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
397 struct mpc_cpu processor;
398 struct mpc_lintsrc lintsrc;
399 int linttypes[2] = { mp_ExtINT, mp_NMI };
400 int i;
403 * local APIC has default address
405 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
408 * 2 CPUs, numbered 0 & 1.
410 processor.type = MP_PROCESSOR;
411 /* Either an integrated APIC or a discrete 82489DX. */
412 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
413 processor.cpuflag = CPU_ENABLED;
414 processor.cpufeature = (boot_cpu_data.x86 << 8) |
415 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
416 processor.featureflag = boot_cpu_data.x86_capability[0];
417 processor.reserved[0] = 0;
418 processor.reserved[1] = 0;
419 for (i = 0; i < 2; i++) {
420 processor.apicid = i;
421 MP_processor_info(&processor);
424 construct_ioapic_table(mpc_default_type);
426 lintsrc.type = MP_LINTSRC;
427 lintsrc.irqflag = 0; /* conforming */
428 lintsrc.srcbusid = 0;
429 lintsrc.srcbusirq = 0;
430 lintsrc.destapic = MP_APIC_ALL;
431 for (i = 0; i < 2; i++) {
432 lintsrc.irqtype = linttypes[i];
433 lintsrc.destapiclint = i;
434 MP_lintsrc_info(&lintsrc);
438 static struct mpf_intel *mpf_found;
440 static unsigned long __init get_mpc_size(unsigned long physptr)
442 struct mpc_table *mpc;
443 unsigned long size;
445 mpc = early_ioremap(physptr, PAGE_SIZE);
446 size = mpc->length;
447 early_iounmap(mpc, PAGE_SIZE);
448 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
450 return size;
453 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
455 struct mpc_table *mpc;
456 unsigned long size;
458 size = get_mpc_size(mpf->physptr);
459 mpc = early_ioremap(mpf->physptr, size);
461 * Read the physical hardware table. Anything here will
462 * override the defaults.
464 if (!smp_read_mpc(mpc, early)) {
465 #ifdef CONFIG_X86_LOCAL_APIC
466 smp_found_config = 0;
467 #endif
468 pr_err("BIOS bug, MP table errors detected!...\n");
469 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
470 early_iounmap(mpc, size);
471 return -1;
473 early_iounmap(mpc, size);
475 if (early)
476 return -1;
478 #ifdef CONFIG_X86_IO_APIC
480 * If there are no explicit MP IRQ entries, then we are
481 * broken. We set up most of the low 16 IO-APIC pins to
482 * ISA defaults and hope it will work.
484 if (!mp_irq_entries) {
485 struct mpc_bus bus;
487 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
489 bus.type = MP_BUS;
490 bus.busid = 0;
491 memcpy(bus.bustype, "ISA ", 6);
492 MP_bus_info(&bus);
494 construct_default_ioirq_mptable(0);
496 #endif
498 return 0;
502 * Scan the memory blocks for an SMP configuration block.
504 void __init default_get_smp_config(unsigned int early)
506 struct mpf_intel *mpf = mpf_found;
508 if (!mpf)
509 return;
511 if (acpi_lapic && early)
512 return;
515 * MPS doesn't support hyperthreading, aka only have
516 * thread 0 apic id in MPS table
518 if (acpi_lapic && acpi_ioapic)
519 return;
521 pr_info("Intel MultiProcessor Specification v1.%d\n",
522 mpf->specification);
523 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
524 if (mpf->feature2 & (1 << 7)) {
525 pr_info(" IMCR and PIC compatibility mode.\n");
526 pic_mode = 1;
527 } else {
528 pr_info(" Virtual Wire compatibility mode.\n");
529 pic_mode = 0;
531 #endif
533 * Now see if we need to read further.
535 if (mpf->feature1 != 0) {
536 if (early) {
538 * local APIC has default address
540 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
541 return;
544 pr_info("Default MP configuration #%d\n", mpf->feature1);
545 construct_default_ISA_mptable(mpf->feature1);
547 } else if (mpf->physptr) {
548 if (check_physptr(mpf, early))
549 return;
550 } else
551 BUG();
553 if (!early)
554 pr_info("Processors: %d\n", num_processors);
556 * Only use the first configuration found.
560 static void __init smp_reserve_memory(struct mpf_intel *mpf)
562 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
565 static int __init smp_scan_config(unsigned long base, unsigned long length)
567 unsigned int *bp = phys_to_virt(base);
568 struct mpf_intel *mpf;
569 unsigned long mem;
571 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
572 base, base + length - 1);
573 BUILD_BUG_ON(sizeof(*mpf) != 16);
575 while (length > 0) {
576 mpf = (struct mpf_intel *)bp;
577 if ((*bp == SMP_MAGIC_IDENT) &&
578 (mpf->length == 1) &&
579 !mpf_checksum((unsigned char *)bp, 16) &&
580 ((mpf->specification == 1)
581 || (mpf->specification == 4))) {
582 #ifdef CONFIG_X86_LOCAL_APIC
583 smp_found_config = 1;
584 #endif
585 mpf_found = mpf;
587 pr_info("found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
588 (unsigned long long) virt_to_phys(mpf),
589 (unsigned long long) virt_to_phys(mpf) +
590 sizeof(*mpf) - 1, mpf);
592 mem = virt_to_phys(mpf);
593 memblock_reserve(mem, sizeof(*mpf));
594 if (mpf->physptr)
595 smp_reserve_memory(mpf);
597 return 1;
599 bp += 4;
600 length -= 16;
602 return 0;
605 void __init default_find_smp_config(void)
607 unsigned int address;
610 * FIXME: Linux assumes you have 640K of base ram..
611 * this continues the error...
613 * 1) Scan the bottom 1K for a signature
614 * 2) Scan the top 1K of base RAM
615 * 3) Scan the 64K of bios
617 if (smp_scan_config(0x0, 0x400) ||
618 smp_scan_config(639 * 0x400, 0x400) ||
619 smp_scan_config(0xF0000, 0x10000))
620 return;
622 * If it is an SMP machine we should know now, unless the
623 * configuration is in an EISA bus machine with an
624 * extended bios data area.
626 * there is a real-mode segmented pointer pointing to the
627 * 4K EBDA area at 0x40E, calculate and scan it here.
629 * NOTE! There are Linux loaders that will corrupt the EBDA
630 * area, and as such this kind of SMP config may be less
631 * trustworthy, simply because the SMP table may have been
632 * stomped on during early boot. These loaders are buggy and
633 * should be fixed.
635 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
638 address = get_bios_ebda();
639 if (address)
640 smp_scan_config(address, 0x400);
643 #ifdef CONFIG_X86_IO_APIC
644 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
646 static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
648 int i;
650 if (m->irqtype != mp_INT)
651 return 0;
653 if (m->irqflag != 0x0f)
654 return 0;
656 /* not legacy */
658 for (i = 0; i < mp_irq_entries; i++) {
659 if (mp_irqs[i].irqtype != mp_INT)
660 continue;
662 if (mp_irqs[i].irqflag != 0x0f)
663 continue;
665 if (mp_irqs[i].srcbus != m->srcbus)
666 continue;
667 if (mp_irqs[i].srcbusirq != m->srcbusirq)
668 continue;
669 if (irq_used[i]) {
670 /* already claimed */
671 return -2;
673 irq_used[i] = 1;
674 return i;
677 /* not found */
678 return -1;
681 #define SPARE_SLOT_NUM 20
683 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
685 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
687 int i;
689 apic_printk(APIC_VERBOSE, "OLD ");
690 print_mp_irq_info(m);
692 i = get_MP_intsrc_index(m);
693 if (i > 0) {
694 memcpy(m, &mp_irqs[i], sizeof(*m));
695 apic_printk(APIC_VERBOSE, "NEW ");
696 print_mp_irq_info(&mp_irqs[i]);
697 return;
699 if (!i) {
700 /* legacy, do nothing */
701 return;
703 if (*nr_m_spare < SPARE_SLOT_NUM) {
705 * not found (-1), or duplicated (-2) are invalid entries,
706 * we need to use the slot later
708 m_spare[*nr_m_spare] = m;
709 *nr_m_spare += 1;
713 static int __init
714 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
716 if (!mpc_new_phys || count <= mpc_new_length) {
717 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
718 return -1;
721 return 0;
723 #else /* CONFIG_X86_IO_APIC */
724 static
725 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
726 #endif /* CONFIG_X86_IO_APIC */
728 static int __init replace_intsrc_all(struct mpc_table *mpc,
729 unsigned long mpc_new_phys,
730 unsigned long mpc_new_length)
732 #ifdef CONFIG_X86_IO_APIC
733 int i;
734 #endif
735 int count = sizeof(*mpc);
736 int nr_m_spare = 0;
737 unsigned char *mpt = ((unsigned char *)mpc) + count;
739 pr_info("mpc_length %x\n", mpc->length);
740 while (count < mpc->length) {
741 switch (*mpt) {
742 case MP_PROCESSOR:
743 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
744 break;
745 case MP_BUS:
746 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
747 break;
748 case MP_IOAPIC:
749 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
750 break;
751 case MP_INTSRC:
752 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
753 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
754 break;
755 case MP_LINTSRC:
756 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
757 break;
758 default:
759 /* wrong mptable */
760 smp_dump_mptable(mpc, mpt);
761 goto out;
765 #ifdef CONFIG_X86_IO_APIC
766 for (i = 0; i < mp_irq_entries; i++) {
767 if (irq_used[i])
768 continue;
770 if (mp_irqs[i].irqtype != mp_INT)
771 continue;
773 if (mp_irqs[i].irqflag != 0x0f)
774 continue;
776 if (nr_m_spare > 0) {
777 apic_printk(APIC_VERBOSE, "*NEW* found\n");
778 nr_m_spare--;
779 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
780 m_spare[nr_m_spare] = NULL;
781 } else {
782 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
783 count += sizeof(struct mpc_intsrc);
784 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
785 goto out;
786 memcpy(m, &mp_irqs[i], sizeof(*m));
787 mpc->length = count;
788 mpt += sizeof(struct mpc_intsrc);
790 print_mp_irq_info(&mp_irqs[i]);
792 #endif
793 out:
794 /* update checksum */
795 mpc->checksum = 0;
796 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
798 return 0;
801 int enable_update_mptable;
803 static int __init update_mptable_setup(char *str)
805 enable_update_mptable = 1;
806 #ifdef CONFIG_PCI
807 pci_routeirq = 1;
808 #endif
809 return 0;
811 early_param("update_mptable", update_mptable_setup);
813 static unsigned long __initdata mpc_new_phys;
814 static unsigned long mpc_new_length __initdata = 4096;
816 /* alloc_mptable or alloc_mptable=4k */
817 static int __initdata alloc_mptable;
818 static int __init parse_alloc_mptable_opt(char *p)
820 enable_update_mptable = 1;
821 #ifdef CONFIG_PCI
822 pci_routeirq = 1;
823 #endif
824 alloc_mptable = 1;
825 if (!p)
826 return 0;
827 mpc_new_length = memparse(p, &p);
828 return 0;
830 early_param("alloc_mptable", parse_alloc_mptable_opt);
832 void __init early_reserve_e820_mpc_new(void)
834 if (enable_update_mptable && alloc_mptable)
835 mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
838 static int __init update_mp_table(void)
840 char str[16];
841 char oem[10];
842 struct mpf_intel *mpf;
843 struct mpc_table *mpc, *mpc_new;
845 if (!enable_update_mptable)
846 return 0;
848 mpf = mpf_found;
849 if (!mpf)
850 return 0;
853 * Now see if we need to go further.
855 if (mpf->feature1 != 0)
856 return 0;
858 if (!mpf->physptr)
859 return 0;
861 mpc = phys_to_virt(mpf->physptr);
863 if (!smp_check_mpc(mpc, oem, str))
864 return 0;
866 pr_info("mpf: %llx\n", (u64)virt_to_phys(mpf));
867 pr_info("physptr: %x\n", mpf->physptr);
869 if (mpc_new_phys && mpc->length > mpc_new_length) {
870 mpc_new_phys = 0;
871 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
872 mpc_new_length);
875 if (!mpc_new_phys) {
876 unsigned char old, new;
877 /* check if we can change the position */
878 mpc->checksum = 0;
879 old = mpf_checksum((unsigned char *)mpc, mpc->length);
880 mpc->checksum = 0xff;
881 new = mpf_checksum((unsigned char *)mpc, mpc->length);
882 if (old == new) {
883 pr_info("mpc is readonly, please try alloc_mptable instead\n");
884 return 0;
886 pr_info("use in-position replacing\n");
887 } else {
888 mpf->physptr = mpc_new_phys;
889 mpc_new = phys_to_virt(mpc_new_phys);
890 memcpy(mpc_new, mpc, mpc->length);
891 mpc = mpc_new;
892 /* check if we can modify that */
893 if (mpc_new_phys - mpf->physptr) {
894 struct mpf_intel *mpf_new;
895 /* steal 16 bytes from [0, 1k) */
896 pr_info("mpf new: %x\n", 0x400 - 16);
897 mpf_new = phys_to_virt(0x400 - 16);
898 memcpy(mpf_new, mpf, 16);
899 mpf = mpf_new;
900 mpf->physptr = mpc_new_phys;
902 mpf->checksum = 0;
903 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
904 pr_info("physptr new: %x\n", mpf->physptr);
908 * only replace the one with mp_INT and
909 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
910 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
911 * may need pci=routeirq for all coverage
913 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
915 return 0;
918 late_initcall(update_mp_table);