amd64: Copy MPTable PCI interrput routing code from i386
[dragonfly.git] / sys / platform / pc64 / amd64 / mp_machdep.c
blobc532aa8322a0d979c072421be455e140663112eb
1 /*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
26 * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.60 2008/06/07 12:03:52 mneumann Exp $
29 #include "opt_cpu.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/memrange.h>
37 #include <sys/cons.h> /* cngetc() */
38 #include <sys/machintr.h>
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_extern.h>
45 #include <sys/lock.h>
46 #include <vm/vm_map.h>
47 #include <sys/user.h>
48 #ifdef GPROF
49 #include <sys/gmon.h>
50 #endif
52 #include <machine/smp.h>
53 #include <machine_base/apic/apicreg.h>
54 #include <machine/atomic.h>
55 #include <machine/cpufunc.h>
56 #include <machine_base/apic/mpapic.h>
57 #include <machine/psl.h>
58 #include <machine/segments.h>
59 #include <machine/tss.h>
60 #include <machine/specialreg.h>
61 #include <machine/globaldata.h>
63 #include <machine/md_var.h> /* setidt() */
64 #include <machine_base/icu/icu.h> /* IPIs */
65 #include <machine_base/isa/intr_machdep.h> /* IPIs */
67 #define FIXUP_EXTRA_APIC_INTS 8 /* additional entries we may create */
69 #define WARMBOOT_TARGET 0
70 #define WARMBOOT_OFF (KERNBASE + 0x0467)
71 #define WARMBOOT_SEG (KERNBASE + 0x0469)
73 #define BIOS_BASE (0xf0000)
74 #define BIOS_SIZE (0x10000)
75 #define BIOS_COUNT (BIOS_SIZE/4)
77 #define CMOS_REG (0x70)
78 #define CMOS_DATA (0x71)
79 #define BIOS_RESET (0x0f)
80 #define BIOS_WARM (0x0a)
82 #define PROCENTRY_FLAG_EN 0x01
83 #define PROCENTRY_FLAG_BP 0x02
84 #define IOAPICENTRY_FLAG_EN 0x01
87 /* MP Floating Pointer Structure */
88 typedef struct MPFPS {
89 char signature[4];
90 u_int32_t pap;
91 u_char length;
92 u_char spec_rev;
93 u_char checksum;
94 u_char mpfb1;
95 u_char mpfb2;
96 u_char mpfb3;
97 u_char mpfb4;
98 u_char mpfb5;
99 } *mpfps_t;
101 /* MP Configuration Table Header */
102 typedef struct MPCTH {
103 char signature[4];
104 u_short base_table_length;
105 u_char spec_rev;
106 u_char checksum;
107 u_char oem_id[8];
108 u_char product_id[12];
109 u_int32_t oem_table_pointer;
110 u_short oem_table_size;
111 u_short entry_count;
112 u_int32_t apic_address;
113 u_short extended_table_length;
114 u_char extended_table_checksum;
115 u_char reserved;
116 } *mpcth_t;
119 typedef struct PROCENTRY {
120 u_char type;
121 u_char apic_id;
122 u_char apic_version;
123 u_char cpu_flags;
124 u_int32_t cpu_signature;
125 u_int32_t feature_flags;
126 u_int32_t reserved1;
127 u_int32_t reserved2;
128 } *proc_entry_ptr;
130 typedef struct BUSENTRY {
131 u_char type;
132 u_char bus_id;
133 char bus_type[6];
134 } *bus_entry_ptr;
136 typedef struct IOAPICENTRY {
137 u_char type;
138 u_char apic_id;
139 u_char apic_version;
140 u_char apic_flags;
141 u_int32_t apic_address;
142 } *io_apic_entry_ptr;
144 typedef struct INTENTRY {
145 u_char type;
146 u_char int_type;
147 u_short int_flags;
148 u_char src_bus_id;
149 u_char src_bus_irq;
150 u_char dst_apic_id;
151 u_char dst_apic_int;
152 } *int_entry_ptr;
154 /* descriptions of MP basetable entries */
155 typedef struct BASETABLE_ENTRY {
156 u_char type;
157 u_char length;
158 char name[16];
159 } basetable_entry;
162 * this code MUST be enabled here and in mpboot.s.
163 * it follows the very early stages of AP boot by placing values in CMOS ram.
164 * it NORMALLY will never be needed and thus the primitive method for enabling.
167 #if defined(CHECK_POINTS)
168 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
169 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
171 #define CHECK_INIT(D); \
172 CHECK_WRITE(0x34, (D)); \
173 CHECK_WRITE(0x35, (D)); \
174 CHECK_WRITE(0x36, (D)); \
175 CHECK_WRITE(0x37, (D)); \
176 CHECK_WRITE(0x38, (D)); \
177 CHECK_WRITE(0x39, (D));
179 #define CHECK_PRINT(S); \
180 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
181 (S), \
182 CHECK_READ(0x34), \
183 CHECK_READ(0x35), \
184 CHECK_READ(0x36), \
185 CHECK_READ(0x37), \
186 CHECK_READ(0x38), \
187 CHECK_READ(0x39));
189 #else /* CHECK_POINTS */
191 #define CHECK_INIT(D)
192 #define CHECK_PRINT(S)
194 #endif /* CHECK_POINTS */
197 * Values to send to the POST hardware.
199 #define MP_BOOTADDRESS_POST 0x10
200 #define MP_PROBE_POST 0x11
201 #define MPTABLE_PASS1_POST 0x12
203 #define MP_START_POST 0x13
204 #define MP_ENABLE_POST 0x14
205 #define MPTABLE_PASS2_POST 0x15
207 #define START_ALL_APS_POST 0x16
208 #define INSTALL_AP_TRAMP_POST 0x17
209 #define START_AP_POST 0x18
211 #define MP_ANNOUNCE_POST 0x19
213 static int need_hyperthreading_fixup;
214 static u_int logical_cpus;
215 u_int logical_cpus_mask;
217 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
218 int current_postcode;
220 /** XXX FIXME: what system files declare these??? */
221 extern struct region_descriptor r_gdt, r_idt;
223 int bsp_apic_ready = 0; /* flags useability of BSP apic */
224 int mp_naps; /* # of Applications processors */
225 int mp_nbusses; /* # of busses */
226 #ifdef APIC_IO
227 int mp_napics; /* # of IO APICs */
228 #endif
229 int boot_cpu_id; /* designated BSP */
230 vm_offset_t cpu_apic_address;
231 #ifdef APIC_IO
232 vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */
233 u_int32_t *io_apic_versions;
234 #endif
235 extern int nkpt;
237 u_int32_t cpu_apic_versions[MAXCPU];
238 int64_t tsc0_offset;
239 extern int64_t tsc_offsets[];
241 #ifdef APIC_IO
242 struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
243 #endif
246 * APIC ID logical/physical mapping structures.
247 * We oversize these to simplify boot-time config.
249 int cpu_num_to_apic_id[NAPICID];
250 #ifdef APIC_IO
251 int io_num_to_apic_id[NAPICID];
252 #endif
253 int apic_id_to_logical[NAPICID];
255 /* AP uses this during bootstrap. Do not staticize. */
256 char *bootSTK;
257 static int bootAP;
259 /* Hotwire a 0->4MB V==P mapping */
260 extern pt_entry_t *KPTphys;
263 * SMP page table page. Setup by locore to point to a page table
264 * page from which we allocate per-cpu privatespace areas io_apics,
265 * and so forth.
268 #define IO_MAPPING_START_INDEX \
269 (SMP_MAXCPU * sizeof(struct privatespace) / PAGE_SIZE)
271 extern pt_entry_t *SMPpt;
272 static int SMPpt_alloc_index = IO_MAPPING_START_INDEX;
274 struct pcb stoppcbs[MAXCPU];
276 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
278 extern void initializecpu(void);
281 * Local data and functions.
284 static int mp_capable;
285 static u_int boot_address;
286 static u_int base_memory;
287 static int mp_finish;
289 static mpfps_t mpfps;
290 static long search_for_sig(u_int32_t target, int count);
291 static void mp_enable(u_int boot_addr);
293 static void mptable_hyperthread_fixup(u_int id_mask);
294 static void mptable_pass1(void);
295 static int mptable_pass2(void);
296 static void default_mp_table(int type);
297 static void fix_mp_table(void);
298 #ifdef APIC_IO
299 static void setup_apic_irq_mapping(void);
300 static int apic_int_is_bus_type(int intr, int bus_type);
301 #endif
302 static int start_all_aps(u_int boot_addr);
303 static void install_ap_tramp(u_int boot_addr);
304 static int start_ap(struct mdglobaldata *gd, u_int boot_addr);
306 static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
307 cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
308 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
309 static u_int bootMP_size;
312 * Calculate usable address in base memory for AP trampoline code.
314 u_int
315 mp_bootaddress(u_int basemem)
317 POSTCODE(MP_BOOTADDRESS_POST);
319 base_memory = basemem;
321 bootMP_size = mptramp_end - mptramp_start;
322 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
323 if (((basemem * 1024) - boot_address) < bootMP_size)
324 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
325 /* 3 levels of page table pages */
326 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
328 return mptramp_pagetables;
333 * Look for an Intel MP spec table (ie, SMP capable hardware).
336 mp_probe(void)
338 long x;
339 u_long segment;
340 u_int32_t target;
343 * Make sure our SMPpt[] page table is big enough to hold all the
344 * mappings we need.
346 KKASSERT(IO_MAPPING_START_INDEX < NPTEPG - 2);
348 POSTCODE(MP_PROBE_POST);
350 /* see if EBDA exists */
351 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
352 /* search first 1K of EBDA */
353 target = (u_int32_t) (segment << 4);
354 if ((x = search_for_sig(target, 1024 / 4)) != -1L)
355 goto found;
356 } else {
357 /* last 1K of base memory, effective 'top of base' passed in */
358 target = (u_int32_t) (base_memory - 0x400);
359 if ((x = search_for_sig(target, 1024 / 4)) != -1L)
360 goto found;
363 /* search the BIOS */
364 target = (u_int32_t) BIOS_BASE;
365 if ((x = search_for_sig(target, BIOS_COUNT)) != -1L)
366 goto found;
368 /* nothing found */
369 mpfps = (mpfps_t)0;
370 mp_capable = 0;
371 return 0;
373 found:
375 * Calculate needed resources. We can safely map physical
376 * memory into SMPpt after mptable_pass1() completes.
378 mpfps = (mpfps_t)x;
379 mptable_pass1();
381 /* flag fact that we are running multiple processors */
382 mp_capable = 1;
383 return 1;
388 * Startup the SMP processors.
390 void
391 mp_start(void)
393 POSTCODE(MP_START_POST);
395 /* look for MP capable motherboard */
396 if (mp_capable)
397 mp_enable(boot_address);
398 else
399 panic("MP hardware not found!");
404 * Print various information about the SMP system hardware and setup.
406 void
407 mp_announce(void)
409 int x;
411 POSTCODE(MP_ANNOUNCE_POST);
413 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
414 kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
415 kprintf(", version: 0x%08x", cpu_apic_versions[0]);
416 kprintf(", at 0x%08x\n", cpu_apic_address);
417 for (x = 1; x <= mp_naps; ++x) {
418 kprintf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
419 kprintf(", version: 0x%08x", cpu_apic_versions[x]);
420 kprintf(", at 0x%08x\n", cpu_apic_address);
423 #if defined(APIC_IO)
424 for (x = 0; x < mp_napics; ++x) {
425 kprintf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
426 kprintf(", version: 0x%08x", io_apic_versions[x]);
427 kprintf(", at 0x%08x\n", io_apic_address[x]);
429 #else
430 kprintf(" Warning: APIC I/O disabled\n");
431 #endif /* APIC_IO */
435 * AP cpu's call this to sync up protected mode.
437 * WARNING! %gs is not set up on entry. This routine sets up %gs.
439 void
440 init_secondary(void)
442 int gsel_tss;
443 int x, myid = bootAP;
444 u_int64_t msr, cr0;
445 struct mdglobaldata *md;
446 struct privatespace *ps;
448 ps = &CPU_prvspace[myid];
450 gdt_segs[GPROC0_SEL].ssd_base =
451 (long) &ps->mdglobaldata.gd_common_tss;
452 ps->mdglobaldata.mi.gd_prvspace = ps;
454 /* We fill the 32-bit segment descriptors */
455 for (x = 0; x < NGDT; x++) {
456 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
457 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
459 /* And now a 64-bit one */
460 ssdtosyssd(&gdt_segs[GPROC0_SEL],
461 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
463 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
464 r_gdt.rd_base = (long) &gdt[myid * NGDT];
465 lgdt(&r_gdt); /* does magic intra-segment return */
467 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
468 wrmsr(MSR_FSBASE, 0); /* User value */
469 wrmsr(MSR_GSBASE, (u_int64_t)ps);
470 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */
472 lidt(&r_idt);
474 #if 0
475 lldt(_default_ldt);
476 mdcpu->gd_currentldt = _default_ldt;
477 #endif
479 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
480 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
482 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
484 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
485 #if 0 /* JG XXX */
486 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
487 #endif
488 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
489 md->gd_common_tssd = *md->gd_tss_gdt;
490 #if 0 /* JG XXX */
491 md->gd_common_tss.tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
492 #endif
493 ltr(gsel_tss);
496 * Set to a known state:
497 * Set by mpboot.s: CR0_PG, CR0_PE
498 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
500 cr0 = rcr0();
501 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
502 load_cr0(cr0);
504 /* Set up the fast syscall stuff */
505 msr = rdmsr(MSR_EFER) | EFER_SCE;
506 wrmsr(MSR_EFER, msr);
507 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
508 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
509 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
510 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
511 wrmsr(MSR_STAR, msr);
512 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
514 pmap_set_opt(); /* PSE/4MB pages, etc */
515 #if JGXXX
516 /* Initialize the PAT MSR. */
517 pmap_init_pat();
518 #endif
520 /* set up CPU registers and state */
521 cpu_setregs();
523 /* set up SSE/NX registers */
524 initializecpu();
526 /* set up FPU state on the AP */
527 npxinit(__INITIAL_NPXCW__);
529 /* disable the APIC, just to be SURE */
530 lapic->svr &= ~APIC_SVR_ENABLE;
532 /* data returned to BSP */
533 cpu_apic_versions[0] = lapic->version;
536 /*******************************************************************
537 * local functions and data
541 * start the SMP system
543 static void
544 mp_enable(u_int boot_addr)
546 int x;
547 #if defined(APIC_IO)
548 int apic;
549 u_int ux;
550 #endif /* APIC_IO */
552 POSTCODE(MP_ENABLE_POST);
554 #if 0 /* JGXXX */
555 /* turn on 4MB of V == P addressing so we can get to MP table */
556 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
557 cpu_invltlb();
558 #endif
560 /* examine the MP table for needed info, uses physical addresses */
561 x = mptable_pass2();
563 #if 0 /* JGXXX */
564 *(int *)PTD = 0;
565 cpu_invltlb();
566 #endif /* 0 JGXXX */
568 /* can't process default configs till the CPU APIC is pmapped */
569 if (x)
570 default_mp_table(x);
572 /* post scan cleanup */
573 fix_mp_table();
575 #if defined(APIC_IO)
577 setup_apic_irq_mapping();
579 /* fill the LOGICAL io_apic_versions table */
580 for (apic = 0; apic < mp_napics; ++apic) {
581 ux = io_apic_read(apic, IOAPIC_VER);
582 io_apic_versions[apic] = ux;
583 io_apic_set_id(apic, IO_TO_ID(apic));
586 /* program each IO APIC in the system */
587 for (apic = 0; apic < mp_napics; ++apic)
588 if (io_apic_setup(apic) < 0)
589 panic("IO APIC setup failure");
591 #endif /* APIC_IO */
594 * These are required for SMP operation
597 /* install a 'Spurious INTerrupt' vector */
598 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
599 SDT_SYSIGT, SEL_KPL, 0);
601 /* install an inter-CPU IPI for TLB invalidation */
602 setidt(XINVLTLB_OFFSET, Xinvltlb,
603 SDT_SYSIGT, SEL_KPL, 0);
605 /* install an inter-CPU IPI for IPIQ messaging */
606 setidt(XIPIQ_OFFSET, Xipiq,
607 SDT_SYSIGT, SEL_KPL, 0);
609 /* install a timer vector */
610 setidt(XTIMER_OFFSET, Xtimer,
611 SDT_SYSIGT, SEL_KPL, 0);
613 /* install an inter-CPU IPI for CPU stop/restart */
614 setidt(XCPUSTOP_OFFSET, Xcpustop,
615 SDT_SYSIGT, SEL_KPL, 0);
617 /* start each Application Processor */
618 start_all_aps(boot_addr);
623 * look for the MP spec signature
626 /* string defined by the Intel MP Spec as identifying the MP table */
627 #define MP_SIG 0x5f504d5f /* _MP_ */
628 #define NEXT(X) ((X) += 4)
629 static long
630 search_for_sig(u_int32_t target, int count)
632 int x;
633 u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
635 for (x = 0; x < count; NEXT(x))
636 if (addr[x] == MP_SIG)
637 /* make array index a byte index */
638 return (long)(&addr[x]);
640 return -1;
644 static basetable_entry basetable_entry_types[] =
646 {0, 20, "Processor"},
647 {1, 8, "Bus"},
648 {2, 8, "I/O APIC"},
649 {3, 8, "I/O INT"},
650 {4, 8, "Local INT"}
653 typedef struct BUSDATA {
654 u_char bus_id;
655 enum busTypes bus_type;
656 } bus_datum;
658 typedef struct INTDATA {
659 u_char int_type;
660 u_short int_flags;
661 u_char src_bus_id;
662 u_char src_bus_irq;
663 u_char dst_apic_id;
664 u_char dst_apic_int;
665 u_char int_vector;
666 } io_int, local_int;
668 typedef struct BUSTYPENAME {
669 u_char type;
670 char name[7];
671 } bus_type_name;
673 static bus_type_name bus_type_table[] =
675 {CBUS, "CBUS"},
676 {CBUSII, "CBUSII"},
677 {EISA, "EISA"},
678 {MCA, "MCA"},
679 {UNKNOWN_BUSTYPE, "---"},
680 {ISA, "ISA"},
681 {MCA, "MCA"},
682 {UNKNOWN_BUSTYPE, "---"},
683 {UNKNOWN_BUSTYPE, "---"},
684 {UNKNOWN_BUSTYPE, "---"},
685 {UNKNOWN_BUSTYPE, "---"},
686 {UNKNOWN_BUSTYPE, "---"},
687 {PCI, "PCI"},
688 {UNKNOWN_BUSTYPE, "---"},
689 {UNKNOWN_BUSTYPE, "---"},
690 {UNKNOWN_BUSTYPE, "---"},
691 {UNKNOWN_BUSTYPE, "---"},
692 {XPRESS, "XPRESS"},
693 {UNKNOWN_BUSTYPE, "---"}
695 /* from MP spec v1.4, table 5-1 */
696 static int default_data[7][5] =
698 /* nbus, id0, type0, id1, type1 */
699 {1, 0, ISA, 255, 255},
700 {1, 0, EISA, 255, 255},
701 {1, 0, EISA, 255, 255},
702 {1, 0, MCA, 255, 255},
703 {2, 0, ISA, 1, PCI},
704 {2, 0, EISA, 1, PCI},
705 {2, 0, MCA, 1, PCI}
709 /* the bus data */
710 static bus_datum *bus_data;
712 #ifdef APIC_IO
713 /* the IO INT data, one entry per possible APIC INTerrupt */
714 static io_int *io_apic_ints;
715 static int nintrs;
716 #endif
718 static int processor_entry (proc_entry_ptr entry, int cpu);
719 static int bus_entry (bus_entry_ptr entry, int bus);
720 #ifdef APIC_IO
721 static int io_apic_entry (io_apic_entry_ptr entry, int apic);
722 static int int_entry (int_entry_ptr entry, int intr);
723 #endif
724 static int lookup_bus_type (char *name);
728 * 1st pass on motherboard's Intel MP specification table.
730 * initializes:
731 * ncpus = 1
733 * determines:
734 * cpu_apic_address (common to all CPUs)
735 * io_apic_address[N]
736 * mp_naps
737 * mp_nbusses
738 * mp_napics
739 * nintrs
741 static void
742 mptable_pass1(void)
744 #ifdef APIC_IO
745 int x;
746 #endif
747 mpcth_t cth;
748 int totalSize;
749 void* position;
750 int count;
751 int type;
752 u_int id_mask;
754 POSTCODE(MPTABLE_PASS1_POST);
756 #ifdef APIC_IO
757 /* clear various tables */
758 for (x = 0; x < NAPICID; ++x) {
759 io_apic_address[x] = ~0; /* IO APIC address table */
761 #endif
763 /* init everything to empty */
764 mp_naps = 0;
765 mp_nbusses = 0;
766 #ifdef APIC_IO
767 mp_napics = 0;
768 nintrs = 0;
769 #endif
770 id_mask = 0;
772 /* check for use of 'default' configuration */
773 if (mpfps->mpfb1 != 0) {
774 /* use default addresses */
775 cpu_apic_address = DEFAULT_APIC_BASE;
776 #ifdef APIC_IO
777 io_apic_address[0] = DEFAULT_IO_APIC_BASE;
778 #endif
780 /* fill in with defaults */
781 mp_naps = 2; /* includes BSP */
782 mp_nbusses = default_data[mpfps->mpfb1 - 1][0];
783 #if defined(APIC_IO)
784 mp_napics = 1;
785 nintrs = 16;
786 #endif /* APIC_IO */
788 else {
789 if ((cth = mpfps->pap) == 0)
790 panic("MP Configuration Table Header MISSING!");
792 cpu_apic_address = (vm_offset_t) cth->apic_address;
794 /* walk the table, recording info of interest */
795 totalSize = cth->base_table_length - sizeof(struct MPCTH);
796 position = (u_char *) cth + sizeof(struct MPCTH);
797 count = cth->entry_count;
799 while (count--) {
800 switch (type = *(u_char *) position) {
801 case 0: /* processor_entry */
802 if (((proc_entry_ptr)position)->cpu_flags
803 & PROCENTRY_FLAG_EN) {
804 ++mp_naps;
805 id_mask |= 1 <<
806 ((proc_entry_ptr)position)->apic_id;
808 break;
809 case 1: /* bus_entry */
810 ++mp_nbusses;
811 break;
812 case 2: /* io_apic_entry */
813 #ifdef APIC_IO
814 if (((io_apic_entry_ptr)position)->apic_flags
815 & IOAPICENTRY_FLAG_EN)
816 io_apic_address[mp_napics++] =
817 (vm_offset_t)((io_apic_entry_ptr)
818 position)->apic_address;
819 #endif
820 break;
821 case 3: /* int_entry */
822 #ifdef APIC_IO
823 ++nintrs;
824 #endif
825 break;
826 case 4: /* int_entry */
827 break;
828 default:
829 panic("mpfps Base Table HOSED!");
830 /* NOTREACHED */
833 totalSize -= basetable_entry_types[type].length;
834 position = (uint8_t *)position +
835 basetable_entry_types[type].length;
839 /* qualify the numbers */
840 if (mp_naps > MAXCPU) {
841 kprintf("Warning: only using %d of %d available CPUs!\n",
842 MAXCPU, mp_naps);
843 mp_naps = MAXCPU;
846 /* See if we need to fixup HT logical CPUs. */
847 mptable_hyperthread_fixup(id_mask);
850 * Count the BSP.
851 * This is also used as a counter while starting the APs.
853 ncpus = 1;
855 --mp_naps; /* subtract the BSP */
860 * 2nd pass on motherboard's Intel MP specification table.
862 * sets:
863 * boot_cpu_id
864 * ID_TO_IO(N), phy APIC ID to log CPU/IO table
865 * CPU_TO_ID(N), logical CPU to APIC ID table
866 * IO_TO_ID(N), logical IO to APIC ID table
867 * bus_data[N]
868 * io_apic_ints[N]
870 static int
871 mptable_pass2(void)
873 struct PROCENTRY proc;
874 int x;
875 mpcth_t cth;
876 int totalSize;
877 void* position;
878 int count;
879 int type;
880 int apic, bus, cpu, intr;
881 int i;
883 POSTCODE(MPTABLE_PASS2_POST);
885 /* Initialize fake proc entry for use with HT fixup. */
886 bzero(&proc, sizeof(proc));
887 proc.type = 0;
888 proc.cpu_flags = PROCENTRY_FLAG_EN;
890 #ifdef APIC_IO
891 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
892 M_DEVBUF, M_WAITOK);
893 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
894 M_DEVBUF, M_WAITOK | M_ZERO);
895 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + FIXUP_EXTRA_APIC_INTS),
896 M_DEVBUF, M_WAITOK);
897 #endif
898 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
899 M_DEVBUF, M_WAITOK);
901 #ifdef APIC_IO
902 for (i = 0; i < mp_napics; i++) {
903 ioapic[i] = permanent_io_mapping(io_apic_address[i]);
905 #endif
907 /* clear various tables */
908 for (x = 0; x < NAPICID; ++x) {
909 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */
910 #ifdef APIC_IO
911 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */
912 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */
913 #endif
916 /* clear bus data table */
917 for (x = 0; x < mp_nbusses; ++x)
918 bus_data[x].bus_id = 0xff;
920 #ifdef APIC_IO
921 /* clear IO APIC INT table */
922 for (x = 0; x < (nintrs + 1); ++x) {
923 io_apic_ints[x].int_type = 0xff;
924 io_apic_ints[x].int_vector = 0xff;
926 #endif
928 /* setup the cpu/apic mapping arrays */
929 boot_cpu_id = -1;
931 /* record whether PIC or virtual-wire mode */
932 machintr_setvar_simple(MACHINTR_VAR_IMCR_PRESENT, mpfps->mpfb2 & 0x80);
934 /* check for use of 'default' configuration */
935 if (mpfps->mpfb1 != 0)
936 return mpfps->mpfb1; /* return default configuration type */
938 if ((cth = mpfps->pap) == 0)
939 panic("MP Configuration Table Header MISSING!");
941 cth = PHYS_TO_DMAP(mpfps->pap);
942 /* walk the table, recording info of interest */
943 totalSize = cth->base_table_length - sizeof(struct MPCTH);
944 position = (u_char *) cth + sizeof(struct MPCTH);
945 count = cth->entry_count;
946 apic = bus = intr = 0;
947 cpu = 1; /* pre-count the BSP */
949 while (count--) {
950 switch (type = *(u_char *) position) {
951 case 0:
952 if (processor_entry(position, cpu))
953 ++cpu;
955 if (need_hyperthreading_fixup) {
957 * Create fake mptable processor entries
958 * and feed them to processor_entry() to
959 * enumerate the logical CPUs.
961 proc.apic_id = ((proc_entry_ptr)position)->apic_id;
962 for (i = 1; i < logical_cpus; i++) {
963 proc.apic_id++;
964 processor_entry(&proc, cpu);
965 logical_cpus_mask |= (1 << cpu);
966 cpu++;
969 break;
970 case 1:
971 if (bus_entry(position, bus))
972 ++bus;
973 break;
974 case 2:
975 #ifdef APIC_IO
976 if (io_apic_entry(position, apic))
977 ++apic;
978 #endif
979 break;
980 case 3:
981 #ifdef APIC_IO
982 if (int_entry(position, intr))
983 ++intr;
984 #endif
985 break;
986 case 4:
987 /* int_entry(position); */
988 break;
989 default:
990 panic("mpfps Base Table HOSED!");
991 /* NOTREACHED */
994 totalSize -= basetable_entry_types[type].length;
995 position = (uint8_t *)position + basetable_entry_types[type].length;
998 if (boot_cpu_id == -1)
999 panic("NO BSP found!");
1001 /* report fact that its NOT a default configuration */
1002 return 0;
1006 * Check if we should perform a hyperthreading "fix-up" to
1007 * enumerate any logical CPU's that aren't already listed
1008 * in the table.
1010 * XXX: We assume that all of the physical CPUs in the
1011 * system have the same number of logical CPUs.
1013 * XXX: We assume that APIC ID's are allocated such that
1014 * the APIC ID's for a physical processor are aligned
1015 * with the number of logical CPU's in the processor.
1017 static void
1018 mptable_hyperthread_fixup(u_int id_mask)
1020 u_int i, id;
1022 /* Nothing to do if there is no HTT support. */
1023 if ((cpu_feature & CPUID_HTT) == 0)
1024 return;
1025 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
1026 if (logical_cpus <= 1)
1027 return;
1030 * For each APIC ID of a CPU that is set in the mask,
1031 * scan the other candidate APIC ID's for this
1032 * physical processor. If any of those ID's are
1033 * already in the table, then kill the fixup.
1035 for (id = 0; id <= MAXCPU; id++) {
1036 if ((id_mask & 1 << id) == 0)
1037 continue;
1038 /* First, make sure we are on a logical_cpus boundary. */
1039 if (id % logical_cpus != 0)
1040 return;
1041 for (i = id + 1; i < id + logical_cpus; i++)
1042 if ((id_mask & 1 << i) != 0)
1043 return;
1047 * Ok, the ID's checked out, so enable the fixup. We have to fixup
1048 * mp_naps right now.
1050 need_hyperthreading_fixup = 1;
1051 mp_naps *= logical_cpus;
1054 #ifdef APIC_IO
1056 void
1057 assign_apic_irq(int apic, int intpin, int irq)
1059 int x;
1061 if (int_to_apicintpin[irq].ioapic != -1)
1062 panic("assign_apic_irq: inconsistent table");
1064 int_to_apicintpin[irq].ioapic = apic;
1065 int_to_apicintpin[irq].int_pin = intpin;
1066 int_to_apicintpin[irq].apic_address = ioapic[apic];
1067 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1069 for (x = 0; x < nintrs; x++) {
1070 if ((io_apic_ints[x].int_type == 0 ||
1071 io_apic_ints[x].int_type == 3) &&
1072 io_apic_ints[x].int_vector == 0xff &&
1073 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1074 io_apic_ints[x].dst_apic_int == intpin)
1075 io_apic_ints[x].int_vector = irq;
1079 void
1080 revoke_apic_irq(int irq)
1082 int x;
1083 int oldapic;
1084 int oldintpin;
1086 if (int_to_apicintpin[irq].ioapic == -1)
1087 panic("revoke_apic_irq: inconsistent table");
1089 oldapic = int_to_apicintpin[irq].ioapic;
1090 oldintpin = int_to_apicintpin[irq].int_pin;
1092 int_to_apicintpin[irq].ioapic = -1;
1093 int_to_apicintpin[irq].int_pin = 0;
1094 int_to_apicintpin[irq].apic_address = NULL;
1095 int_to_apicintpin[irq].redirindex = 0;
1097 for (x = 0; x < nintrs; x++) {
1098 if ((io_apic_ints[x].int_type == 0 ||
1099 io_apic_ints[x].int_type == 3) &&
1100 io_apic_ints[x].int_vector != 0xff &&
1101 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1102 io_apic_ints[x].dst_apic_int == oldintpin)
1103 io_apic_ints[x].int_vector = 0xff;
1108 * Allocate an IRQ
1110 static void
1111 allocate_apic_irq(int intr)
1113 int apic;
1114 int intpin;
1115 int irq;
1117 if (io_apic_ints[intr].int_vector != 0xff)
1118 return; /* Interrupt handler already assigned */
1120 if (io_apic_ints[intr].int_type != 0 &&
1121 (io_apic_ints[intr].int_type != 3 ||
1122 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1123 io_apic_ints[intr].dst_apic_int == 0)))
1124 return; /* Not INT or ExtInt on != (0, 0) */
1126 irq = 0;
1127 while (irq < APIC_INTMAPSIZE &&
1128 int_to_apicintpin[irq].ioapic != -1)
1129 irq++;
1131 if (irq >= APIC_INTMAPSIZE)
1132 return; /* No free interrupt handlers */
1134 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1135 intpin = io_apic_ints[intr].dst_apic_int;
1137 assign_apic_irq(apic, intpin, irq);
1141 static void
1142 swap_apic_id(int apic, int oldid, int newid)
1144 int x;
1145 int oapic;
1148 if (oldid == newid)
1149 return; /* Nothing to do */
1151 kprintf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1152 apic, oldid, newid);
1154 /* Swap physical APIC IDs in interrupt entries */
1155 for (x = 0; x < nintrs; x++) {
1156 if (io_apic_ints[x].dst_apic_id == oldid)
1157 io_apic_ints[x].dst_apic_id = newid;
1158 else if (io_apic_ints[x].dst_apic_id == newid)
1159 io_apic_ints[x].dst_apic_id = oldid;
1162 /* Swap physical APIC IDs in IO_TO_ID mappings */
1163 for (oapic = 0; oapic < mp_napics; oapic++)
1164 if (IO_TO_ID(oapic) == newid)
1165 break;
1167 if (oapic < mp_napics) {
1168 kprintf("Changing APIC ID for IO APIC #%d from "
1169 "%d to %d in MP table\n",
1170 oapic, newid, oldid);
1171 IO_TO_ID(oapic) = oldid;
1173 IO_TO_ID(apic) = newid;
1177 static void
1178 fix_id_to_io_mapping(void)
1180 int x;
1182 for (x = 0; x < NAPICID; x++)
1183 ID_TO_IO(x) = -1;
1185 for (x = 0; x <= mp_naps; x++)
1186 if (CPU_TO_ID(x) < NAPICID)
1187 ID_TO_IO(CPU_TO_ID(x)) = x;
1189 for (x = 0; x < mp_napics; x++)
1190 if (IO_TO_ID(x) < NAPICID)
1191 ID_TO_IO(IO_TO_ID(x)) = x;
1195 static int
1196 first_free_apic_id(void)
1198 int freeid, x;
1200 for (freeid = 0; freeid < NAPICID; freeid++) {
1201 for (x = 0; x <= mp_naps; x++)
1202 if (CPU_TO_ID(x) == freeid)
1203 break;
1204 if (x <= mp_naps)
1205 continue;
1206 for (x = 0; x < mp_napics; x++)
1207 if (IO_TO_ID(x) == freeid)
1208 break;
1209 if (x < mp_napics)
1210 continue;
1211 return freeid;
1213 return freeid;
1217 static int
1218 io_apic_id_acceptable(int apic, int id)
1220 int cpu; /* Logical CPU number */
1221 int oapic; /* Logical IO APIC number for other IO APIC */
1223 if (id >= NAPICID)
1224 return 0; /* Out of range */
1226 for (cpu = 0; cpu <= mp_naps; cpu++)
1227 if (CPU_TO_ID(cpu) == id)
1228 return 0; /* Conflict with CPU */
1230 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1231 if (IO_TO_ID(oapic) == id)
1232 return 0; /* Conflict with other APIC */
1234 return 1; /* ID is acceptable for IO APIC */
1237 static
1238 io_int *
1239 io_apic_find_int_entry(int apic, int pin)
1241 int x;
1243 /* search each of the possible INTerrupt sources */
1244 for (x = 0; x < nintrs; ++x) {
1245 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1246 (pin == io_apic_ints[x].dst_apic_int))
1247 return (&io_apic_ints[x]);
1249 return NULL;
1252 #endif
1255 * parse an Intel MP specification table
1257 static void
1258 fix_mp_table(void)
1260 int x;
1261 #ifdef APIC_IO
1262 int id;
1263 int apic; /* IO APIC unit number */
1264 int freeid; /* Free physical APIC ID */
1265 int physid; /* Current physical IO APIC ID */
1266 io_int *io14;
1267 #endif
1268 int bus_0 = 0; /* Stop GCC warning */
1269 int bus_pci = 0; /* Stop GCC warning */
1270 int num_pci_bus;
1273 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1274 * did it wrong. The MP spec says that when more than 1 PCI bus
1275 * exists the BIOS must begin with bus entries for the PCI bus and use
1276 * actual PCI bus numbering. This implies that when only 1 PCI bus
1277 * exists the BIOS can choose to ignore this ordering, and indeed many
1278 * MP motherboards do ignore it. This causes a problem when the PCI
1279 * sub-system makes requests of the MP sub-system based on PCI bus
1280 * numbers. So here we look for the situation and renumber the
1281 * busses and associated INTs in an effort to "make it right".
1284 /* find bus 0, PCI bus, count the number of PCI busses */
1285 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1286 if (bus_data[x].bus_id == 0) {
1287 bus_0 = x;
1289 if (bus_data[x].bus_type == PCI) {
1290 ++num_pci_bus;
1291 bus_pci = x;
1295 * bus_0 == slot of bus with ID of 0
1296 * bus_pci == slot of last PCI bus encountered
1299 /* check the 1 PCI bus case for sanity */
1300 /* if it is number 0 all is well */
1301 if (num_pci_bus == 1 &&
1302 bus_data[bus_pci].bus_id != 0) {
1304 /* mis-numbered, swap with whichever bus uses slot 0 */
1306 /* swap the bus entry types */
1307 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1308 bus_data[bus_0].bus_type = PCI;
1310 #ifdef APIC_IO
1311 /* swap each relavant INTerrupt entry */
1312 id = bus_data[bus_pci].bus_id;
1313 for (x = 0; x < nintrs; ++x) {
1314 if (io_apic_ints[x].src_bus_id == id) {
1315 io_apic_ints[x].src_bus_id = 0;
1317 else if (io_apic_ints[x].src_bus_id == 0) {
1318 io_apic_ints[x].src_bus_id = id;
1321 #endif
1324 #ifdef APIC_IO
1325 /* Assign IO APIC IDs.
1327 * First try the existing ID. If a conflict is detected, try
1328 * the ID in the MP table. If a conflict is still detected, find
1329 * a free id.
1331 * We cannot use the ID_TO_IO table before all conflicts has been
1332 * resolved and the table has been corrected.
1334 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1336 /* First try to use the value set by the BIOS */
1337 physid = io_apic_get_id(apic);
1338 if (io_apic_id_acceptable(apic, physid)) {
1339 if (IO_TO_ID(apic) != physid)
1340 swap_apic_id(apic, IO_TO_ID(apic), physid);
1341 continue;
1344 /* Then check if the value in the MP table is acceptable */
1345 if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1346 continue;
1348 /* Last resort, find a free APIC ID and use it */
1349 freeid = first_free_apic_id();
1350 if (freeid >= NAPICID)
1351 panic("No free physical APIC IDs found");
1353 if (io_apic_id_acceptable(apic, freeid)) {
1354 swap_apic_id(apic, IO_TO_ID(apic), freeid);
1355 continue;
1357 panic("Free physical APIC ID not usable");
1359 fix_id_to_io_mapping();
1360 #endif
1362 #ifdef APIC_IO
1363 /* detect and fix broken Compaq MP table */
1364 if (apic_int_type(0, 0) == -1) {
1365 kprintf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1366 io_apic_ints[nintrs].int_type = 3; /* ExtInt */
1367 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */
1368 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1369 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1370 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */
1371 nintrs++;
1372 } else if (apic_int_type(0, 0) == 0) {
1373 kprintf("APIC_IO: MP table broken: ExtINT entry corrupt!\n");
1374 for (x = 0; x < nintrs; ++x)
1375 if ((0 == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1376 (0 == io_apic_ints[x].dst_apic_int)) {
1377 io_apic_ints[x].int_type = 3;
1378 io_apic_ints[x].int_vector = 0xff;
1379 break;
1384 * Fix missing IRQ 15 when IRQ 14 is an ISA interrupt. IDE
1385 * controllers universally come in pairs. If IRQ 14 is specified
1386 * as an ISA interrupt, then IRQ 15 had better be too.
1388 * [ Shuttle XPC / AMD Athlon X2 ]
1389 * The MPTable is missing an entry for IRQ 15. Note that the
1390 * ACPI table has an entry for both 14 and 15.
1392 if (apic_int_type(0, 14) == 0 && apic_int_type(0, 15) == -1) {
1393 kprintf("APIC_IO: MP table broken: IRQ 15 not ISA when IRQ 14 is!\n");
1394 io14 = io_apic_find_int_entry(0, 14);
1395 io_apic_ints[nintrs] = *io14;
1396 io_apic_ints[nintrs].src_bus_irq = 15;
1397 io_apic_ints[nintrs].dst_apic_int = 15;
1398 nintrs++;
1400 #endif
1403 #ifdef APIC_IO
1405 /* Assign low level interrupt handlers */
1406 static void
1407 setup_apic_irq_mapping(void)
1409 int x;
1410 int int_vector;
1412 /* Clear array */
1413 for (x = 0; x < APIC_INTMAPSIZE; x++) {
1414 int_to_apicintpin[x].ioapic = -1;
1415 int_to_apicintpin[x].int_pin = 0;
1416 int_to_apicintpin[x].apic_address = NULL;
1417 int_to_apicintpin[x].redirindex = 0;
1420 /* First assign ISA/EISA interrupts */
1421 for (x = 0; x < nintrs; x++) {
1422 int_vector = io_apic_ints[x].src_bus_irq;
1423 if (int_vector < APIC_INTMAPSIZE &&
1424 io_apic_ints[x].int_vector == 0xff &&
1425 int_to_apicintpin[int_vector].ioapic == -1 &&
1426 (apic_int_is_bus_type(x, ISA) ||
1427 apic_int_is_bus_type(x, EISA)) &&
1428 io_apic_ints[x].int_type == 0) {
1429 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1430 io_apic_ints[x].dst_apic_int,
1431 int_vector);
1435 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1436 for (x = 0; x < nintrs; x++) {
1437 if (io_apic_ints[x].dst_apic_int == 0 &&
1438 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1439 io_apic_ints[x].int_vector == 0xff &&
1440 int_to_apicintpin[0].ioapic == -1 &&
1441 io_apic_ints[x].int_type == 3) {
1442 assign_apic_irq(0, 0, 0);
1443 break;
1447 /* Assign PCI interrupts */
1448 for (x = 0; x < nintrs; ++x) {
1449 if (io_apic_ints[x].int_type == 0 &&
1450 io_apic_ints[x].int_vector == 0xff &&
1451 apic_int_is_bus_type(x, PCI))
1452 allocate_apic_irq(x);
1456 #endif
1458 static int
1459 processor_entry(proc_entry_ptr entry, int cpu)
1461 /* check for usability */
1462 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1463 return 0;
1465 if(entry->apic_id >= NAPICID)
1466 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1467 /* check for BSP flag */
1468 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1469 boot_cpu_id = entry->apic_id;
1470 CPU_TO_ID(0) = entry->apic_id;
1471 ID_TO_CPU(entry->apic_id) = 0;
1472 return 0; /* its already been counted */
1475 /* add another AP to list, if less than max number of CPUs */
1476 else if (cpu < MAXCPU) {
1477 CPU_TO_ID(cpu) = entry->apic_id;
1478 ID_TO_CPU(entry->apic_id) = cpu;
1479 return 1;
1482 return 0;
1486 static int
1487 bus_entry(bus_entry_ptr entry, int bus)
1489 int x;
1490 char c, name[8];
1492 /* encode the name into an index */
1493 for (x = 0; x < 6; ++x) {
1494 if ((c = entry->bus_type[x]) == ' ')
1495 break;
1496 name[x] = c;
1498 name[x] = '\0';
1500 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1501 panic("unknown bus type: '%s'", name);
1503 bus_data[bus].bus_id = entry->bus_id;
1504 bus_data[bus].bus_type = x;
1506 return 1;
1509 #ifdef APIC_IO
1511 static int
1512 io_apic_entry(io_apic_entry_ptr entry, int apic)
1514 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1515 return 0;
1517 IO_TO_ID(apic) = entry->apic_id;
1518 if (entry->apic_id < NAPICID)
1519 ID_TO_IO(entry->apic_id) = apic;
1521 return 1;
1524 #endif
1526 static int
1527 lookup_bus_type(char *name)
1529 int x;
1531 for (x = 0; x < MAX_BUSTYPE; ++x)
1532 if (strcmp(bus_type_table[x].name, name) == 0)
1533 return bus_type_table[x].type;
1535 return UNKNOWN_BUSTYPE;
1538 #ifdef APIC_IO
1540 static int
1541 int_entry(int_entry_ptr entry, int intr)
1543 int apic;
1545 io_apic_ints[intr].int_type = entry->int_type;
1546 io_apic_ints[intr].int_flags = entry->int_flags;
1547 io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1548 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1549 if (entry->dst_apic_id == 255) {
1550 /* This signal goes to all IO APICS. Select an IO APIC
1551 with sufficient number of interrupt pins */
1552 for (apic = 0; apic < mp_napics; apic++)
1553 if (((io_apic_read(apic, IOAPIC_VER) &
1554 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1555 entry->dst_apic_int)
1556 break;
1557 if (apic < mp_napics)
1558 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1559 else
1560 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1561 } else
1562 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1563 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1565 return 1;
1568 static int
1569 apic_int_is_bus_type(int intr, int bus_type)
1571 int bus;
1573 for (bus = 0; bus < mp_nbusses; ++bus)
1574 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1575 && ((int) bus_data[bus].bus_type == bus_type))
1576 return 1;
1578 return 0;
1582 * Given a traditional ISA INT mask, return an APIC mask.
1584 u_int
1585 isa_apic_mask(u_int isa_mask)
1587 int isa_irq;
1588 int apic_pin;
1590 #if defined(SKIP_IRQ15_REDIRECT)
1591 if (isa_mask == (1 << 15)) {
1592 kprintf("skipping ISA IRQ15 redirect\n");
1593 return isa_mask;
1595 #endif /* SKIP_IRQ15_REDIRECT */
1597 isa_irq = ffs(isa_mask); /* find its bit position */
1598 if (isa_irq == 0) /* doesn't exist */
1599 return 0;
1600 --isa_irq; /* make it zero based */
1602 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */
1603 if (apic_pin == -1)
1604 return 0;
1606 return (1 << apic_pin); /* convert pin# to a mask */
1610 * Determine which APIC pin an ISA/EISA INT is attached to.
1612 #define INTTYPE(I) (io_apic_ints[(I)].int_type)
1613 #define INTPIN(I) (io_apic_ints[(I)].dst_apic_int)
1614 #define INTIRQ(I) (io_apic_ints[(I)].int_vector)
1615 #define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1617 #define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq)
1619 isa_apic_irq(int isa_irq)
1621 int intr;
1623 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1624 if (INTTYPE(intr) == 0) { /* standard INT */
1625 if (SRCBUSIRQ(intr) == isa_irq) {
1626 if (apic_int_is_bus_type(intr, ISA) ||
1627 apic_int_is_bus_type(intr, EISA)) {
1628 if (INTIRQ(intr) == 0xff)
1629 return -1; /* unassigned */
1630 return INTIRQ(intr); /* found */
1635 return -1; /* NOT found */
1640 * Determine which APIC pin a PCI INT is attached to.
1642 #define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id)
1643 #define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1644 #define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03)
1646 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1648 int intr;
1650 --pciInt; /* zero based */
1652 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1653 if ((INTTYPE(intr) == 0) /* standard INT */
1654 && (SRCBUSID(intr) == pciBus)
1655 && (SRCBUSDEVICE(intr) == pciDevice)
1656 && (SRCBUSLINE(intr) == pciInt)) { /* a candidate IRQ */
1657 if (apic_int_is_bus_type(intr, PCI)) {
1658 if (INTIRQ(intr) == 0xff) {
1659 kprintf("IOAPIC: pci_apic_irq() "
1660 "failed\n");
1661 return -1; /* unassigned */
1663 return INTIRQ(intr); /* exact match */
1668 return -1; /* NOT found */
1672 next_apic_irq(int irq)
1674 int intr, ointr;
1675 int bus, bustype;
1677 bus = 0;
1678 bustype = 0;
1679 for (intr = 0; intr < nintrs; intr++) {
1680 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1681 continue;
1682 bus = SRCBUSID(intr);
1683 bustype = apic_bus_type(bus);
1684 if (bustype != ISA &&
1685 bustype != EISA &&
1686 bustype != PCI)
1687 continue;
1688 break;
1690 if (intr >= nintrs) {
1691 return -1;
1693 for (ointr = intr + 1; ointr < nintrs; ointr++) {
1694 if (INTTYPE(ointr) != 0)
1695 continue;
1696 if (bus != SRCBUSID(ointr))
1697 continue;
1698 if (bustype == PCI) {
1699 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1700 continue;
1701 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1702 continue;
1704 if (bustype == ISA || bustype == EISA) {
1705 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1706 continue;
1708 if (INTPIN(intr) == INTPIN(ointr))
1709 continue;
1710 break;
1712 if (ointr >= nintrs) {
1713 return -1;
1715 return INTIRQ(ointr);
1717 #undef SRCBUSLINE
1718 #undef SRCBUSDEVICE
1719 #undef SRCBUSID
1720 #undef SRCBUSIRQ
1722 #undef INTPIN
1723 #undef INTIRQ
1724 #undef INTAPIC
1725 #undef INTTYPE
1727 #endif
1730 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1732 * XXX FIXME:
1733 * Exactly what this means is unclear at this point. It is a solution
1734 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard
1735 * could route any of the ISA INTs to upper (>15) IRQ values. But most would
1736 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1737 * option.
1740 undirect_isa_irq(int rirq)
1742 #if defined(READY)
1743 if (bootverbose)
1744 kprintf("Freeing redirected ISA irq %d.\n", rirq);
1745 /** FIXME: tickle the MB redirector chip */
1746 return /* XXX */;
1747 #else
1748 if (bootverbose)
1749 kprintf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1750 return 0;
1751 #endif /* READY */
1756 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1759 undirect_pci_irq(int rirq)
1761 #if defined(READY)
1762 if (bootverbose)
1763 kprintf("Freeing redirected PCI irq %d.\n", rirq);
1765 /** FIXME: tickle the MB redirector chip */
1766 return /* XXX */;
1767 #else
1768 if (bootverbose)
1769 kprintf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1770 rirq);
1771 return 0;
1772 #endif /* READY */
1777 * given a bus ID, return:
1778 * the bus type if found
1779 * -1 if NOT found
1782 apic_bus_type(int id)
1784 int x;
1786 for (x = 0; x < mp_nbusses; ++x)
1787 if (bus_data[x].bus_id == id)
1788 return bus_data[x].bus_type;
1790 return -1;
1793 #ifdef APIC_IO
1796 * given a LOGICAL APIC# and pin#, return:
1797 * the associated src bus ID if found
1798 * -1 if NOT found
1801 apic_src_bus_id(int apic, int pin)
1803 int x;
1805 /* search each of the possible INTerrupt sources */
1806 for (x = 0; x < nintrs; ++x)
1807 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1808 (pin == io_apic_ints[x].dst_apic_int))
1809 return (io_apic_ints[x].src_bus_id);
1811 return -1; /* NOT found */
1815 * given a LOGICAL APIC# and pin#, return:
1816 * the associated src bus IRQ if found
1817 * -1 if NOT found
1820 apic_src_bus_irq(int apic, int pin)
1822 int x;
1824 for (x = 0; x < nintrs; x++)
1825 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1826 (pin == io_apic_ints[x].dst_apic_int))
1827 return (io_apic_ints[x].src_bus_irq);
1829 return -1; /* NOT found */
1834 * given a LOGICAL APIC# and pin#, return:
1835 * the associated INTerrupt type if found
1836 * -1 if NOT found
1839 apic_int_type(int apic, int pin)
1841 int x;
1843 /* search each of the possible INTerrupt sources */
1844 for (x = 0; x < nintrs; ++x) {
1845 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1846 (pin == io_apic_ints[x].dst_apic_int))
1847 return (io_apic_ints[x].int_type);
1849 return -1; /* NOT found */
1853 * Return the IRQ associated with an APIC pin
1855 int
1856 apic_irq(int apic, int pin)
1858 int x;
1859 int res;
1861 for (x = 0; x < nintrs; ++x) {
1862 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1863 (pin == io_apic_ints[x].dst_apic_int)) {
1864 res = io_apic_ints[x].int_vector;
1865 if (res == 0xff)
1866 return -1;
1867 if (apic != int_to_apicintpin[res].ioapic)
1868 panic("apic_irq: inconsistent table %d/%d", apic, int_to_apicintpin[res].ioapic);
1869 if (pin != int_to_apicintpin[res].int_pin)
1870 panic("apic_irq inconsistent table (2)");
1871 return res;
1874 return -1;
1879 * given a LOGICAL APIC# and pin#, return:
1880 * the associated trigger mode if found
1881 * -1 if NOT found
1884 apic_trigger(int apic, int pin)
1886 int x;
1888 /* search each of the possible INTerrupt sources */
1889 for (x = 0; x < nintrs; ++x)
1890 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1891 (pin == io_apic_ints[x].dst_apic_int))
1892 return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1894 return -1; /* NOT found */
1899 * given a LOGICAL APIC# and pin#, return:
1900 * the associated 'active' level if found
1901 * -1 if NOT found
1904 apic_polarity(int apic, int pin)
1906 int x;
1908 /* search each of the possible INTerrupt sources */
1909 for (x = 0; x < nintrs; ++x)
1910 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1911 (pin == io_apic_ints[x].dst_apic_int))
1912 return (io_apic_ints[x].int_flags & 0x03);
1914 return -1; /* NOT found */
1917 #endif
1920 * set data according to MP defaults
1921 * FIXME: probably not complete yet...
1923 static void
1924 default_mp_table(int type)
1926 int ap_cpu_id;
1927 #if defined(APIC_IO)
1928 int io_apic_id;
1929 int pin;
1930 #endif /* APIC_IO */
1932 #if 0
1933 kprintf(" MP default config type: %d\n", type);
1934 switch (type) {
1935 case 1:
1936 kprintf(" bus: ISA, APIC: 82489DX\n");
1937 break;
1938 case 2:
1939 kprintf(" bus: EISA, APIC: 82489DX\n");
1940 break;
1941 case 3:
1942 kprintf(" bus: EISA, APIC: 82489DX\n");
1943 break;
1944 case 4:
1945 kprintf(" bus: MCA, APIC: 82489DX\n");
1946 break;
1947 case 5:
1948 kprintf(" bus: ISA+PCI, APIC: Integrated\n");
1949 break;
1950 case 6:
1951 kprintf(" bus: EISA+PCI, APIC: Integrated\n");
1952 break;
1953 case 7:
1954 kprintf(" bus: MCA+PCI, APIC: Integrated\n");
1955 break;
1956 default:
1957 kprintf(" future type\n");
1958 break;
1959 /* NOTREACHED */
1961 #endif /* 0 */
1963 boot_cpu_id = (lapic->id & APIC_ID_MASK) >> 24;
1964 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1966 /* BSP */
1967 CPU_TO_ID(0) = boot_cpu_id;
1968 ID_TO_CPU(boot_cpu_id) = 0;
1970 /* one and only AP */
1971 CPU_TO_ID(1) = ap_cpu_id;
1972 ID_TO_CPU(ap_cpu_id) = 1;
1974 #if defined(APIC_IO)
1975 /* one and only IO APIC */
1976 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1979 * sanity check, refer to MP spec section 3.6.6, last paragraph
1980 * necessary as some hardware isn't properly setting up the IO APIC
1982 #if defined(REALLY_ANAL_IOAPICID_VALUE)
1983 if (io_apic_id != 2) {
1984 #else
1985 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1986 #endif /* REALLY_ANAL_IOAPICID_VALUE */
1987 io_apic_set_id(0, 2);
1988 io_apic_id = 2;
1990 IO_TO_ID(0) = io_apic_id;
1991 ID_TO_IO(io_apic_id) = 0;
1992 #endif /* APIC_IO */
1994 /* fill out bus entries */
1995 switch (type) {
1996 case 1:
1997 case 2:
1998 case 3:
1999 case 4:
2000 case 5:
2001 case 6:
2002 case 7:
2003 bus_data[0].bus_id = default_data[type - 1][1];
2004 bus_data[0].bus_type = default_data[type - 1][2];
2005 bus_data[1].bus_id = default_data[type - 1][3];
2006 bus_data[1].bus_type = default_data[type - 1][4];
2007 break;
2009 /* case 4: case 7: MCA NOT supported */
2010 default: /* illegal/reserved */
2011 panic("BAD default MP config: %d", type);
2012 /* NOTREACHED */
2015 #if defined(APIC_IO)
2016 /* general cases from MP v1.4, table 5-2 */
2017 for (pin = 0; pin < 16; ++pin) {
2018 io_apic_ints[pin].int_type = 0;
2019 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */
2020 io_apic_ints[pin].src_bus_id = 0;
2021 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */
2022 io_apic_ints[pin].dst_apic_id = io_apic_id;
2023 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */
2026 /* special cases from MP v1.4, table 5-2 */
2027 if (type == 2) {
2028 io_apic_ints[2].int_type = 0xff; /* N/C */
2029 io_apic_ints[13].int_type = 0xff; /* N/C */
2030 #if !defined(APIC_MIXED_MODE)
2031 /** FIXME: ??? */
2032 panic("sorry, can't support type 2 default yet");
2033 #endif /* APIC_MIXED_MODE */
2035 else
2036 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */
2038 if (type == 7)
2039 io_apic_ints[0].int_type = 0xff; /* N/C */
2040 else
2041 io_apic_ints[0].int_type = 3; /* vectored 8259 */
2042 #endif /* APIC_IO */
2046 * Map a physical memory address representing I/O into KVA. The I/O
2047 * block is assumed not to cross a page boundary.
2049 void *
2050 permanent_io_mapping(vm_paddr_t pa)
2052 KKASSERT(pa < 0x100000000LL);
2054 return pmap_mapdev_uncacheable(pa, PAGE_SIZE);
2058 * start each AP in our list
2060 static int
2061 start_all_aps(u_int boot_addr)
2063 vm_offset_t va = boot_address + KERNBASE;
2064 u_int64_t *pt4, *pt3, *pt2;
2065 int x, i, pg;
2066 int shift;
2067 u_char mpbiosreason;
2068 u_long mpbioswarmvec;
2069 struct mdglobaldata *gd;
2070 struct privatespace *ps;
2071 char *stack;
2072 uintptr_t kptbase;
2074 POSTCODE(START_ALL_APS_POST);
2076 /* Initialize BSP's local APIC */
2077 apic_initialize(TRUE);
2078 bsp_apic_ready = 1;
2080 /* install the AP 1st level boot code */
2081 pmap_kenter(va, boot_address);
2082 cpu_invlpg(va); /* JG XXX */
2083 bcopy(mptramp_start, (void *)va, bootMP_size);
2085 /* Locate the page tables, they'll be below the trampoline */
2086 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
2087 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
2088 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
2090 /* Create the initial 1GB replicated page tables */
2091 for (i = 0; i < 512; i++) {
2092 /* Each slot of the level 4 pages points to the same level 3 page */
2093 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
2094 pt4[i] |= PG_V | PG_RW | PG_U;
2096 /* Each slot of the level 3 pages points to the same level 2 page */
2097 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
2098 pt3[i] |= PG_V | PG_RW | PG_U;
2100 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
2101 pt2[i] = i * (2 * 1024 * 1024);
2102 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
2105 /* save the current value of the warm-start vector */
2106 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
2107 outb(CMOS_REG, BIOS_RESET);
2108 mpbiosreason = inb(CMOS_DATA);
2110 /* setup a vector to our boot code */
2111 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2112 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
2113 outb(CMOS_REG, BIOS_RESET);
2114 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
2116 /* start each AP */
2117 for (x = 1; x <= mp_naps; ++x) {
2119 /* This is a bit verbose, it will go away soon. */
2121 /* first page of AP's private space */
2122 pg = x * amd64_btop(sizeof(struct privatespace));
2124 /* allocate new private data page(s) */
2125 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
2126 MDGLOBALDATA_BASEALLOC_SIZE);
2127 #if JGXXX
2128 /* wire it into the private page table page */
2129 for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) {
2130 SMPpt[pg + i / PAGE_SIZE] = (pt_entry_t)
2131 (PG_V | PG_RW | vtophys_pte((char *)gd + i));
2133 pg += MDGLOBALDATA_BASEALLOC_PAGES;
2135 SMPpt[pg + 0] = 0; /* *gd_CMAP1 */
2136 SMPpt[pg + 1] = 0; /* *gd_CMAP2 */
2137 SMPpt[pg + 2] = 0; /* *gd_CMAP3 */
2138 SMPpt[pg + 3] = 0; /* *gd_PMAP1 */
2140 /* allocate and set up an idle stack data page */
2141 stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE);
2142 for (i = 0; i < UPAGES; i++) {
2143 SMPpt[pg + 4 + i] = (pt_entry_t)
2144 (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack));
2146 #endif
2148 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
2149 bzero(gd, sizeof(*gd));
2150 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
2152 /* prime data page for it to use */
2153 mi_gdinit(&gd->mi, x);
2154 cpu_gdinit(gd, x);
2155 gd->gd_CMAP1 = &SMPpt[pg + 0];
2156 gd->gd_CMAP2 = &SMPpt[pg + 1];
2157 gd->gd_CMAP3 = &SMPpt[pg + 2];
2158 gd->gd_PMAP1 = &SMPpt[pg + 3];
2159 gd->gd_CADDR1 = ps->CPAGE1;
2160 gd->gd_CADDR2 = ps->CPAGE2;
2161 gd->gd_CADDR3 = ps->CPAGE3;
2162 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
2163 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
2164 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
2166 /* setup a vector to our boot code */
2167 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2168 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2169 outb(CMOS_REG, BIOS_RESET);
2170 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
2173 * Setup the AP boot stack
2175 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
2176 bootAP = x;
2178 /* attempt to start the Application Processor */
2179 CHECK_INIT(99); /* setup checkpoints */
2180 if (!start_ap(gd, boot_addr)) {
2181 kprintf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2182 CHECK_PRINT("trace"); /* show checkpoints */
2183 /* better panic as the AP may be running loose */
2184 kprintf("panic y/n? [y] ");
2185 if (cngetc() != 'n')
2186 panic("bye-bye");
2188 CHECK_PRINT("trace"); /* show checkpoints */
2190 /* record its version info */
2191 cpu_apic_versions[x] = cpu_apic_versions[0];
2194 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
2195 ncpus = x;
2197 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
2198 for (shift = 0; (1 << shift) <= ncpus; ++shift)
2200 --shift;
2201 ncpus2_shift = shift;
2202 ncpus2 = 1 << shift;
2203 ncpus2_mask = ncpus2 - 1;
2205 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
2206 if ((1 << shift) < ncpus)
2207 ++shift;
2208 ncpus_fit = 1 << shift;
2209 ncpus_fit_mask = ncpus_fit - 1;
2211 /* build our map of 'other' CPUs */
2212 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2213 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
2214 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
2216 /* fill in our (BSP) APIC version */
2217 cpu_apic_versions[0] = lapic->version;
2219 /* restore the warmstart vector */
2220 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2221 outb(CMOS_REG, BIOS_RESET);
2222 outb(CMOS_DATA, mpbiosreason);
2225 * NOTE! The idlestack for the BSP was setup by locore. Finish
2226 * up, clean out the P==V mapping we did earlier.
2228 #if JGXXX
2229 for (x = 0; x < NKPT; x++)
2230 PTD[x] = 0;
2231 #endif
2232 pmap_set_opt();
2234 /* number of APs actually started */
2235 return ncpus - 1;
2240 * load the 1st level AP boot code into base memory.
2243 /* targets for relocation */
2244 extern void bigJump(void);
2245 extern void bootCodeSeg(void);
2246 extern void bootDataSeg(void);
2247 extern void MPentry(void);
2248 extern u_int MP_GDT;
2249 extern u_int mp_gdtbase;
2251 static void
2252 install_ap_tramp(u_int boot_addr)
2254 int x;
2255 int size = *(int *) ((u_long) & bootMP_size);
2256 u_char *src = (u_char *) ((u_long) bootMP);
2257 u_char *dst = (u_char *) boot_addr + KERNBASE;
2258 u_int boot_base = (u_int) bootMP;
2259 u_int8_t *dst8;
2260 u_int16_t *dst16;
2261 u_int32_t *dst32;
2263 POSTCODE(INSTALL_AP_TRAMP_POST);
2265 for (x = 0; x < size; ++x)
2266 *dst++ = *src++;
2269 * modify addresses in code we just moved to basemem. unfortunately we
2270 * need fairly detailed info about mpboot.s for this to work. changes
2271 * to mpboot.s might require changes here.
2274 /* boot code is located in KERNEL space */
2275 dst = (u_char *) boot_addr + KERNBASE;
2277 /* modify the lgdt arg */
2278 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2279 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2281 /* modify the ljmp target for MPentry() */
2282 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2283 *dst32 = ((u_int) MPentry - KERNBASE);
2285 /* modify the target for boot code segment */
2286 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2287 dst8 = (u_int8_t *) (dst16 + 1);
2288 *dst16 = (u_int) boot_addr & 0xffff;
2289 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2291 /* modify the target for boot data segment */
2292 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2293 dst8 = (u_int8_t *) (dst16 + 1);
2294 *dst16 = (u_int) boot_addr & 0xffff;
2295 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2300 * this function starts the AP (application processor) identified
2301 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
2302 * to accomplish this. This is necessary because of the nuances
2303 * of the different hardware we might encounter. It ain't pretty,
2304 * but it seems to work.
2306 * NOTE: eventually an AP gets to ap_init(), which is called just
2307 * before the AP goes into the LWKT scheduler's idle loop.
2309 static int
2310 start_ap(struct mdglobaldata *gd, u_int boot_addr)
2312 int physical_cpu;
2313 int vector;
2314 u_long icr_lo, icr_hi;
2316 POSTCODE(START_AP_POST);
2318 /* get the PHYSICAL APIC ID# */
2319 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
2321 /* calculate the vector */
2322 vector = (boot_addr >> 12) & 0xff;
2324 /* Make sure the target cpu sees everything */
2325 wbinvd();
2328 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2329 * and running the target CPU. OR this INIT IPI might be latched (P5
2330 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2331 * ignored.
2334 /* setup the address for the target AP */
2335 icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
2336 icr_hi |= (physical_cpu << 24);
2337 lapic->icr_hi = icr_hi;
2339 /* do an INIT IPI: assert RESET */
2340 icr_lo = lapic->icr_lo & 0xfff00000;
2341 lapic->icr_lo = icr_lo | 0x0000c500;
2343 /* wait for pending status end */
2344 while (lapic->icr_lo & APIC_DELSTAT_MASK)
2345 /* spin */ ;
2347 /* do an INIT IPI: deassert RESET */
2348 lapic->icr_lo = icr_lo | 0x00008500;
2350 /* wait for pending status end */
2351 u_sleep(10000); /* wait ~10mS */
2352 while (lapic->icr_lo & APIC_DELSTAT_MASK)
2353 /* spin */ ;
2356 * next we do a STARTUP IPI: the previous INIT IPI might still be
2357 * latched, (P5 bug) this 1st STARTUP would then terminate
2358 * immediately, and the previously started INIT IPI would continue. OR
2359 * the previous INIT IPI has already run. and this STARTUP IPI will
2360 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2361 * will run.
2364 /* do a STARTUP IPI */
2365 lapic->icr_lo = icr_lo | 0x00000600 | vector;
2366 while (lapic->icr_lo & APIC_DELSTAT_MASK)
2367 /* spin */ ;
2368 u_sleep(200); /* wait ~200uS */
2371 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2372 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2373 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2374 * recognized after hardware RESET or INIT IPI.
2377 lapic->icr_lo = icr_lo | 0x00000600 | vector;
2378 while (lapic->icr_lo & APIC_DELSTAT_MASK)
2379 /* spin */ ;
2380 u_sleep(200); /* wait ~200uS */
2382 /* wait for it to start, see ap_init() */
2383 set_apic_timer(5000000);/* == 5 seconds */
2384 while (read_apic_timer()) {
2385 if (smp_startup_mask & (1 << gd->mi.gd_cpuid))
2386 return 1; /* return SUCCESS */
2388 return 0; /* return FAILURE */
2393 * Lazy flush the TLB on all other CPU's. DEPRECATED.
2395 * If for some reason we were unable to start all cpus we cannot safely
2396 * use broadcast IPIs.
2398 void
2399 smp_invltlb(void)
2401 #ifdef SMP
2402 if (smp_startup_mask == smp_active_mask) {
2403 all_but_self_ipi(XINVLTLB_OFFSET);
2404 } else {
2405 selected_apic_ipi(smp_active_mask, XINVLTLB_OFFSET,
2406 APIC_DELMODE_FIXED);
2408 #endif
2412 * When called the executing CPU will send an IPI to all other CPUs
2413 * requesting that they halt execution.
2415 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2417 * - Signals all CPUs in map to stop.
2418 * - Waits for each to stop.
2420 * Returns:
2421 * -1: error
2422 * 0: NA
2423 * 1: ok
2425 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2426 * from executing at same time.
2429 stop_cpus(u_int map)
2431 map &= smp_active_mask;
2433 /* send the Xcpustop IPI to all CPUs in map */
2434 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2436 while ((stopped_cpus & map) != map)
2437 /* spin */ ;
2439 return 1;
2444 * Called by a CPU to restart stopped CPUs.
2446 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2448 * - Signals all CPUs in map to restart.
2449 * - Waits for each to restart.
2451 * Returns:
2452 * -1: error
2453 * 0: NA
2454 * 1: ok
2457 restart_cpus(u_int map)
2459 /* signal other cpus to restart */
2460 started_cpus = map & smp_active_mask;
2462 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2463 /* spin */ ;
2465 return 1;
2469 * This is called once the mpboot code has gotten us properly relocated
2470 * and the MMU turned on, etc. ap_init() is actually the idle thread,
2471 * and when it returns the scheduler will call the real cpu_idle() main
2472 * loop for the idlethread. Interrupts are disabled on entry and should
2473 * remain disabled at return.
2475 void
2476 ap_init(void)
2478 u_int apic_id;
2481 * Adjust smp_startup_mask to signal the BSP that we have started
2482 * up successfully. Note that we do not yet hold the BGL. The BSP
2483 * is waiting for our signal.
2485 * We can't set our bit in smp_active_mask yet because we are holding
2486 * interrupts physically disabled and remote cpus could deadlock
2487 * trying to send us an IPI.
2489 smp_startup_mask |= 1 << mycpu->gd_cpuid;
2490 cpu_mfence();
2493 * Interlock for finalization. Wait until mp_finish is non-zero,
2494 * then get the MP lock.
2496 * Note: We are in a critical section.
2498 * Note: We have to synchronize td_mpcount to our desired MP state
2499 * before calling cpu_try_mplock().
2501 * Note: we are the idle thread, we can only spin.
2503 * Note: The load fence is memory volatile and prevents the compiler
2504 * from improperly caching mp_finish, and the cpu from improperly
2505 * caching it.
2507 while (mp_finish == 0)
2508 cpu_lfence();
2509 ++curthread->td_mpcount;
2510 while (cpu_try_mplock() == 0)
2513 if (cpu_feature & CPUID_TSC) {
2515 * The BSP is constantly updating tsc0_offset, figure out the
2516 * relative difference to synchronize ktrdump.
2518 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
2521 /* BSP may have changed PTD while we're waiting for the lock */
2522 cpu_invltlb();
2524 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2525 lidt(&r_idt);
2526 #endif
2528 /* Build our map of 'other' CPUs. */
2529 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2531 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
2533 /* A quick check from sanity claus */
2534 apic_id = (apic_id_to_logical[(lapic->id & 0x0f000000) >> 24]);
2535 if (mycpu->gd_cpuid != apic_id) {
2536 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
2537 kprintf("SMP: apic_id = %d\n", apic_id);
2538 #if JGXXX
2539 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2540 #endif
2541 panic("cpuid mismatch! boom!!");
2544 /* Initialize AP's local APIC for irq's */
2545 apic_initialize(FALSE);
2547 /* Set memory range attributes for this CPU to match the BSP */
2548 mem_range_AP_init();
2551 * Once we go active we must process any IPIQ messages that may
2552 * have been queued, because no actual IPI will occur until we
2553 * set our bit in the smp_active_mask. If we don't the IPI
2554 * message interlock could be left set which would also prevent
2555 * further IPIs.
2557 * The idle loop doesn't expect the BGL to be held and while
2558 * lwkt_switch() normally cleans things up this is a special case
2559 * because we returning almost directly into the idle loop.
2561 * The idle thread is never placed on the runq, make sure
2562 * nothing we've done put it there.
2564 KKASSERT(curthread->td_mpcount == 1);
2565 smp_active_mask |= 1 << mycpu->gd_cpuid;
2568 * Enable interrupts here. idle_restore will also do it, but
2569 * doing it here lets us clean up any strays that got posted to
2570 * the CPU during the AP boot while we are still in a critical
2571 * section.
2573 __asm __volatile("sti; pause; pause"::);
2574 mdcpu->gd_fpending = 0;
2576 initclocks_pcpu(); /* clock interrupts (via IPIs) */
2577 lwkt_process_ipiq();
2580 * Releasing the mp lock lets the BSP finish up the SMP init
2582 rel_mplock();
2583 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
2587 * Get SMP fully working before we start initializing devices.
2589 static
2590 void
2591 ap_finish(void)
2593 mp_finish = 1;
2594 if (bootverbose)
2595 kprintf("Finish MP startup\n");
2596 if (cpu_feature & CPUID_TSC)
2597 tsc0_offset = rdtsc();
2598 tsc_offsets[0] = 0;
2599 rel_mplock();
2600 while (smp_active_mask != smp_startup_mask) {
2601 cpu_lfence();
2602 if (cpu_feature & CPUID_TSC)
2603 tsc0_offset = rdtsc();
2605 while (try_mplock() == 0)
2607 if (bootverbose)
2608 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
2611 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
2613 void
2614 cpu_send_ipiq(int dcpu)
2616 if ((1 << dcpu) & smp_active_mask)
2617 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
2620 #if 0 /* single_apic_ipi_passive() not working yet */
2622 * Returns 0 on failure, 1 on success
2625 cpu_send_ipiq_passive(int dcpu)
2627 int r = 0;
2628 if ((1 << dcpu) & smp_active_mask) {
2629 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
2630 APIC_DELMODE_FIXED);
2632 return(r);
2634 #endif