Staticize mp_nbusses
[dragonfly.git] / sys / platform / pc32 / i386 / mp_machdep.c
blob1be79555eb5151fa3cc3053516689e85d13109fa
1 /*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
26 * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.60 2008/06/07 12:03:52 mneumann Exp $
29 #include "opt_cpu.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/memrange.h>
37 #include <sys/cons.h> /* cngetc() */
38 #include <sys/machintr.h>
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_extern.h>
45 #include <sys/lock.h>
46 #include <vm/vm_map.h>
47 #include <sys/user.h>
48 #ifdef GPROF
49 #include <sys/gmon.h>
50 #endif
52 #include <machine/smp.h>
53 #include <machine_base/apic/apicreg.h>
54 #include <machine/atomic.h>
55 #include <machine/cpufunc.h>
56 #include <machine_base/apic/mpapic.h>
57 #include <machine/psl.h>
58 #include <machine/segments.h>
59 #include <machine/tss.h>
60 #include <machine/specialreg.h>
61 #include <machine/globaldata.h>
63 #include <machine/md_var.h> /* setidt() */
64 #include <machine_base/icu/icu.h> /* IPIs */
65 #include <machine_base/isa/intr_machdep.h> /* IPIs */
67 #define FIXUP_EXTRA_APIC_INTS 8 /* additional entries we may create */
69 #define WARMBOOT_TARGET 0
70 #define WARMBOOT_OFF (KERNBASE + 0x0467)
71 #define WARMBOOT_SEG (KERNBASE + 0x0469)
73 #define BIOS_BASE (0xf0000)
74 #define BIOS_SIZE (0x10000)
75 #define BIOS_COUNT (BIOS_SIZE/4)
77 #define CMOS_REG (0x70)
78 #define CMOS_DATA (0x71)
79 #define BIOS_RESET (0x0f)
80 #define BIOS_WARM (0x0a)
82 #define PROCENTRY_FLAG_EN 0x01
83 #define PROCENTRY_FLAG_BP 0x02
84 #define IOAPICENTRY_FLAG_EN 0x01
87 /* MP Floating Pointer Structure */
88 typedef struct MPFPS {
89 char signature[4];
90 u_int32_t pap;
91 u_char length;
92 u_char spec_rev;
93 u_char checksum;
94 u_char mpfb1;
95 u_char mpfb2;
96 u_char mpfb3;
97 u_char mpfb4;
98 u_char mpfb5;
99 } *mpfps_t;
101 /* MP Configuration Table Header */
102 typedef struct MPCTH {
103 char signature[4];
104 u_short base_table_length;
105 u_char spec_rev;
106 u_char checksum;
107 u_char oem_id[8];
108 u_char product_id[12];
109 void *oem_table_pointer;
110 u_short oem_table_size;
111 u_short entry_count;
112 void *apic_address;
113 u_short extended_table_length;
114 u_char extended_table_checksum;
115 u_char reserved;
116 } *mpcth_t;
119 typedef struct PROCENTRY {
120 u_char type;
121 u_char apic_id;
122 u_char apic_version;
123 u_char cpu_flags;
124 u_long cpu_signature;
125 u_long feature_flags;
126 u_long reserved1;
127 u_long reserved2;
128 } *proc_entry_ptr;
130 typedef struct BUSENTRY {
131 u_char type;
132 u_char bus_id;
133 char bus_type[6];
134 } *bus_entry_ptr;
136 typedef struct IOAPICENTRY {
137 u_char type;
138 u_char apic_id;
139 u_char apic_version;
140 u_char apic_flags;
141 void *apic_address;
142 } *io_apic_entry_ptr;
144 typedef struct INTENTRY {
145 u_char type;
146 u_char int_type;
147 u_short int_flags;
148 u_char src_bus_id;
149 u_char src_bus_irq;
150 u_char dst_apic_id;
151 u_char dst_apic_int;
152 } *int_entry_ptr;
154 /* descriptions of MP basetable entries */
155 typedef struct BASETABLE_ENTRY {
156 u_char type;
157 u_char length;
158 char name[16];
159 } basetable_entry;
161 struct mptable_pos {
162 mpfps_t mp_fps;
163 mpcth_t mp_cth;
164 vm_size_t mp_cth_mapsz;
168 * this code MUST be enabled here and in mpboot.s.
169 * it follows the very early stages of AP boot by placing values in CMOS ram.
170 * it NORMALLY will never be needed and thus the primitive method for enabling.
173 #if defined(CHECK_POINTS)
174 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
175 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
177 #define CHECK_INIT(D); \
178 CHECK_WRITE(0x34, (D)); \
179 CHECK_WRITE(0x35, (D)); \
180 CHECK_WRITE(0x36, (D)); \
181 CHECK_WRITE(0x37, (D)); \
182 CHECK_WRITE(0x38, (D)); \
183 CHECK_WRITE(0x39, (D));
185 #define CHECK_PRINT(S); \
186 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
187 (S), \
188 CHECK_READ(0x34), \
189 CHECK_READ(0x35), \
190 CHECK_READ(0x36), \
191 CHECK_READ(0x37), \
192 CHECK_READ(0x38), \
193 CHECK_READ(0x39));
195 #else /* CHECK_POINTS */
197 #define CHECK_INIT(D)
198 #define CHECK_PRINT(S)
200 #endif /* CHECK_POINTS */
203 * Values to send to the POST hardware.
205 #define MP_BOOTADDRESS_POST 0x10
206 #define MP_PROBE_POST 0x11
207 #define MPTABLE_PASS1_POST 0x12
209 #define MP_START_POST 0x13
210 #define MP_ENABLE_POST 0x14
211 #define MPTABLE_PASS2_POST 0x15
213 #define START_ALL_APS_POST 0x16
214 #define INSTALL_AP_TRAMP_POST 0x17
215 #define START_AP_POST 0x18
217 #define MP_ANNOUNCE_POST 0x19
219 static int need_hyperthreading_fixup;
220 static u_int logical_cpus;
221 u_int logical_cpus_mask;
223 static int madt_probe_test;
224 TUNABLE_INT("hw.madt_probe_test", &madt_probe_test);
226 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
227 int current_postcode;
229 /** XXX FIXME: what system files declare these??? */
230 extern struct region_descriptor r_gdt, r_idt;
232 int mp_naps; /* # of Applications processors */
233 static int mp_nbusses; /* # of busses */
234 #ifdef APIC_IO
235 int mp_napics; /* # of IO APICs */
236 #endif
237 vm_offset_t cpu_apic_address;
238 #ifdef APIC_IO
239 vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */
240 u_int32_t *io_apic_versions;
241 #endif
242 extern int nkpt;
244 u_int32_t cpu_apic_versions[MAXCPU];
245 int64_t tsc0_offset;
246 extern int64_t tsc_offsets[];
248 extern u_long ebda_addr;
250 #ifdef APIC_IO
251 struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
252 #endif
255 * APIC ID logical/physical mapping structures.
256 * We oversize these to simplify boot-time config.
258 int cpu_num_to_apic_id[NAPICID];
259 #ifdef APIC_IO
260 int io_num_to_apic_id[NAPICID];
261 #endif
262 int apic_id_to_logical[NAPICID];
264 /* AP uses this during bootstrap. Do not staticize. */
265 char *bootSTK;
266 static int bootAP;
268 /* Hotwire a 0->4MB V==P mapping */
269 extern pt_entry_t *KPTphys;
272 * SMP page table page. Setup by locore to point to a page table
273 * page from which we allocate per-cpu privatespace areas io_apics,
274 * and so forth.
277 #define IO_MAPPING_START_INDEX \
278 (SMP_MAXCPU * sizeof(struct privatespace) / PAGE_SIZE)
280 extern pt_entry_t *SMPpt;
281 static int SMPpt_alloc_index = IO_MAPPING_START_INDEX;
283 struct pcb stoppcbs[MAXCPU];
286 * Local data and functions.
289 static u_int boot_address;
290 static u_int base_memory;
291 static int mp_finish;
293 static void mp_enable(u_int boot_addr);
295 static int mptable_probe(void);
296 static int mptable_search_sig(u_int32_t target, int count);
297 static void mptable_hyperthread_fixup(u_int id_mask);
298 static void mptable_pass1(struct mptable_pos *);
299 static int mptable_pass2(struct mptable_pos *);
300 static void mptable_default(int type);
301 static void mptable_fix(void);
302 static void mptable_map(struct mptable_pos *, vm_paddr_t);
303 static void mptable_unmap(struct mptable_pos *);
305 #ifdef APIC_IO
306 static void setup_apic_irq_mapping(void);
307 static int apic_int_is_bus_type(int intr, int bus_type);
308 #endif
309 static int start_all_aps(u_int boot_addr);
310 static void install_ap_tramp(u_int boot_addr);
311 static int start_ap(struct mdglobaldata *gd, u_int boot_addr);
313 static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
314 cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
315 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
318 * Calculate usable address in base memory for AP trampoline code.
320 u_int
321 mp_bootaddress(u_int basemem)
323 POSTCODE(MP_BOOTADDRESS_POST);
325 base_memory = basemem;
327 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */
328 if ((base_memory - boot_address) < bootMP_size)
329 boot_address -= 4096; /* not enough, lower by 4k */
331 return boot_address;
336 * Look for an Intel MP spec table (ie, SMP capable hardware).
338 static int
339 mptable_probe(void)
341 int x;
342 u_int32_t target;
345 * Make sure our SMPpt[] page table is big enough to hold all the
346 * mappings we need.
348 KKASSERT(IO_MAPPING_START_INDEX < NPTEPG - 2);
350 POSTCODE(MP_PROBE_POST);
352 /* see if EBDA exists */
353 if (ebda_addr != 0) {
354 /* search first 1K of EBDA */
355 target = (u_int32_t)ebda_addr;
356 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
357 return x;
358 } else {
359 /* last 1K of base memory, effective 'top of base' passed in */
360 target = (u_int32_t)(base_memory - 0x400);
361 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
362 return x;
365 /* search the BIOS */
366 target = (u_int32_t)BIOS_BASE;
367 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
368 return x;
370 /* nothing found */
371 return 0;
376 * Startup the SMP processors.
378 void
379 mp_start(void)
381 POSTCODE(MP_START_POST);
382 mp_enable(boot_address);
387 * Print various information about the SMP system hardware and setup.
389 void
390 mp_announce(void)
392 int x;
394 POSTCODE(MP_ANNOUNCE_POST);
396 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
397 kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
398 kprintf(", version: 0x%08x", cpu_apic_versions[0]);
399 kprintf(", at 0x%08x\n", cpu_apic_address);
400 for (x = 1; x <= mp_naps; ++x) {
401 kprintf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
402 kprintf(", version: 0x%08x", cpu_apic_versions[x]);
403 kprintf(", at 0x%08x\n", cpu_apic_address);
406 #if defined(APIC_IO)
407 for (x = 0; x < mp_napics; ++x) {
408 kprintf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
409 kprintf(", version: 0x%08x", io_apic_versions[x]);
410 kprintf(", at 0x%08x\n", io_apic_address[x]);
412 #else
413 kprintf(" Warning: APIC I/O disabled\n");
414 #endif /* APIC_IO */
418 * AP cpu's call this to sync up protected mode.
420 * WARNING! We must ensure that the cpu is sufficiently initialized to
421 * be able to use to the FP for our optimized bzero/bcopy code before
422 * we enter more mainstream C code.
424 * WARNING! %fs is not set up on entry. This routine sets up %fs.
426 void
427 init_secondary(void)
429 int gsel_tss;
430 int x, myid = bootAP;
431 u_int cr0;
432 struct mdglobaldata *md;
433 struct privatespace *ps;
435 ps = &CPU_prvspace[myid];
437 gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
438 gdt_segs[GPROC0_SEL].ssd_base =
439 (int) &ps->mdglobaldata.gd_common_tss;
440 ps->mdglobaldata.mi.gd_prvspace = ps;
442 for (x = 0; x < NGDT; x++) {
443 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
446 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
447 r_gdt.rd_base = (int) &gdt[myid * NGDT];
448 lgdt(&r_gdt); /* does magic intra-segment return */
450 lidt(&r_idt);
452 lldt(_default_ldt);
453 mdcpu->gd_currentldt = _default_ldt;
455 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
456 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
458 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
460 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
461 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
462 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
463 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
464 md->gd_common_tssd = *md->gd_tss_gdt;
465 ltr(gsel_tss);
468 * Set to a known state:
469 * Set by mpboot.s: CR0_PG, CR0_PE
470 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
472 cr0 = rcr0();
473 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
474 load_cr0(cr0);
475 pmap_set_opt(); /* PSE/4MB pages, etc */
477 /* set up CPU registers and state */
478 cpu_setregs();
480 /* set up FPU state on the AP */
481 npxinit(__INITIAL_NPXCW__);
483 /* set up SSE registers */
484 enable_sse();
487 /*******************************************************************
488 * local functions and data
492 * start the SMP system
494 static void
495 mp_enable(u_int boot_addr)
497 int x;
498 #if defined(APIC_IO)
499 int apic;
500 u_int ux;
501 #endif /* APIC_IO */
502 vm_paddr_t mpfps_paddr;
504 POSTCODE(MP_ENABLE_POST);
506 if (madt_probe_test)
507 mpfps_paddr = 0;
508 else
509 mpfps_paddr = mptable_probe();
511 if (mpfps_paddr) {
512 struct mptable_pos mpt;
514 mptable_map(&mpt, mpfps_paddr);
517 * We can safely map physical memory into SMPpt after
518 * mptable_pass1() completes.
520 mptable_pass1(&mpt);
522 if (cpu_apic_address == 0)
523 panic("mp_enable: no local apic (mptable)!\n");
526 * Examine the MP table for needed info
528 x = mptable_pass2(&mpt);
530 mptable_unmap(&mpt);
532 /* Local apic is mapped on last page */
533 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N |
534 pmap_get_pgeflag() | (cpu_apic_address & PG_FRAME));
537 * Can't process default configs till the
538 * CPU APIC is pmapped
540 if (x)
541 mptable_default(x);
543 /* Post scan cleanup */
544 mptable_fix();
545 } else {
546 vm_paddr_t madt_paddr;
547 int bsp_apic_id;
549 madt_paddr = madt_probe();
550 if (madt_paddr == 0)
551 panic("mp_enable: madt_probe failed\n");
553 cpu_apic_address = madt_pass1(madt_paddr);
554 if (cpu_apic_address == 0)
555 panic("mp_enable: no local apic (madt)!\n");
557 /* Local apic is mapped on last page */
558 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N |
559 pmap_get_pgeflag() | (cpu_apic_address & PG_FRAME));
561 bsp_apic_id = (lapic.id & 0xff000000) >> 24;
562 if (madt_pass2(madt_paddr, bsp_apic_id))
563 panic("mp_enable: madt_pass2 failed\n");
566 #if defined(APIC_IO)
568 setup_apic_irq_mapping();
570 /* fill the LOGICAL io_apic_versions table */
571 for (apic = 0; apic < mp_napics; ++apic) {
572 ux = io_apic_read(apic, IOAPIC_VER);
573 io_apic_versions[apic] = ux;
574 io_apic_set_id(apic, IO_TO_ID(apic));
577 /* program each IO APIC in the system */
578 for (apic = 0; apic < mp_napics; ++apic)
579 if (io_apic_setup(apic) < 0)
580 panic("IO APIC setup failure");
582 #endif /* APIC_IO */
585 * These are required for SMP operation
588 /* install a 'Spurious INTerrupt' vector */
589 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
590 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
592 /* install an inter-CPU IPI for TLB invalidation */
593 setidt(XINVLTLB_OFFSET, Xinvltlb,
594 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
596 /* install an inter-CPU IPI for IPIQ messaging */
597 setidt(XIPIQ_OFFSET, Xipiq,
598 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
600 /* install a timer vector */
601 setidt(XTIMER_OFFSET, Xtimer,
602 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
604 /* install an inter-CPU IPI for CPU stop/restart */
605 setidt(XCPUSTOP_OFFSET, Xcpustop,
606 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
608 /* start each Application Processor */
609 start_all_aps(boot_addr);
614 * look for the MP spec signature
617 /* string defined by the Intel MP Spec as identifying the MP table */
618 #define MP_SIG 0x5f504d5f /* _MP_ */
619 #define NEXT(X) ((X) += 4)
620 static int
621 mptable_search_sig(u_int32_t target, int count)
623 vm_size_t map_size;
624 u_int32_t *addr;
625 int x, ret;
627 KKASSERT(target != 0);
629 map_size = count * sizeof(u_int32_t);
630 addr = pmap_mapdev((vm_paddr_t)target, map_size);
632 ret = 0;
633 for (x = 0; x < count; NEXT(x)) {
634 if (addr[x] == MP_SIG) {
635 /* make array index a byte index */
636 ret = target + (x * sizeof(u_int32_t));
637 break;
641 pmap_unmapdev((vm_offset_t)addr, map_size);
642 return ret;
646 static basetable_entry basetable_entry_types[] =
648 {0, 20, "Processor"},
649 {1, 8, "Bus"},
650 {2, 8, "I/O APIC"},
651 {3, 8, "I/O INT"},
652 {4, 8, "Local INT"}
655 typedef struct BUSDATA {
656 u_char bus_id;
657 enum busTypes bus_type;
658 } bus_datum;
660 typedef struct INTDATA {
661 u_char int_type;
662 u_short int_flags;
663 u_char src_bus_id;
664 u_char src_bus_irq;
665 u_char dst_apic_id;
666 u_char dst_apic_int;
667 u_char int_vector;
668 } io_int, local_int;
670 typedef struct BUSTYPENAME {
671 u_char type;
672 char name[7];
673 } bus_type_name;
675 static bus_type_name bus_type_table[] =
677 {CBUS, "CBUS"},
678 {CBUSII, "CBUSII"},
679 {EISA, "EISA"},
680 {MCA, "MCA"},
681 {UNKNOWN_BUSTYPE, "---"},
682 {ISA, "ISA"},
683 {MCA, "MCA"},
684 {UNKNOWN_BUSTYPE, "---"},
685 {UNKNOWN_BUSTYPE, "---"},
686 {UNKNOWN_BUSTYPE, "---"},
687 {UNKNOWN_BUSTYPE, "---"},
688 {UNKNOWN_BUSTYPE, "---"},
689 {PCI, "PCI"},
690 {UNKNOWN_BUSTYPE, "---"},
691 {UNKNOWN_BUSTYPE, "---"},
692 {UNKNOWN_BUSTYPE, "---"},
693 {UNKNOWN_BUSTYPE, "---"},
694 {XPRESS, "XPRESS"},
695 {UNKNOWN_BUSTYPE, "---"}
697 /* from MP spec v1.4, table 5-1 */
698 static int default_data[7][5] =
700 /* nbus, id0, type0, id1, type1 */
701 {1, 0, ISA, 255, 255},
702 {1, 0, EISA, 255, 255},
703 {1, 0, EISA, 255, 255},
704 {1, 0, MCA, 255, 255},
705 {2, 0, ISA, 1, PCI},
706 {2, 0, EISA, 1, PCI},
707 {2, 0, MCA, 1, PCI}
711 /* the bus data */
712 static bus_datum *bus_data;
714 #ifdef APIC_IO
715 /* the IO INT data, one entry per possible APIC INTerrupt */
716 static io_int *io_apic_ints;
717 static int nintrs;
718 #endif
720 static int processor_entry (proc_entry_ptr entry, int cpu);
721 static int bus_entry (bus_entry_ptr entry, int bus);
722 #ifdef APIC_IO
723 static int io_apic_entry (io_apic_entry_ptr entry, int apic);
724 static int int_entry (int_entry_ptr entry, int intr);
725 #endif
726 static int lookup_bus_type (char *name);
730 * 1st pass on motherboard's Intel MP specification table.
732 * determines:
733 * cpu_apic_address (common to all CPUs)
734 * io_apic_address[N]
735 * mp_naps
736 * mp_nbusses
737 * mp_napics
738 * nintrs
739 * need_hyperthreading_fixup
740 * logical_cpus
742 static void
743 mptable_pass1(struct mptable_pos *mpt)
745 #ifdef APIC_IO
746 int x;
747 #endif
748 mpfps_t fps;
749 mpcth_t cth;
750 int totalSize;
751 void* position;
752 int count;
753 int type;
754 u_int id_mask;
756 POSTCODE(MPTABLE_PASS1_POST);
758 fps = mpt->mp_fps;
759 KKASSERT(fps != NULL);
761 #ifdef APIC_IO
762 /* clear various tables */
763 for (x = 0; x < NAPICID; ++x) {
764 io_apic_address[x] = ~0; /* IO APIC address table */
766 #endif
768 /* init everything to empty */
769 mp_naps = 0;
770 mp_nbusses = 0;
771 #ifdef APIC_IO
772 mp_napics = 0;
773 nintrs = 0;
774 #endif
775 id_mask = 0;
777 /* check for use of 'default' configuration */
778 if (fps->mpfb1 != 0) {
779 /* use default addresses */
780 cpu_apic_address = DEFAULT_APIC_BASE;
781 #ifdef APIC_IO
782 io_apic_address[0] = DEFAULT_IO_APIC_BASE;
783 #endif
785 /* fill in with defaults */
786 mp_naps = 2; /* includes BSP */
787 mp_nbusses = default_data[fps->mpfb1 - 1][0];
788 #if defined(APIC_IO)
789 mp_napics = 1;
790 nintrs = 16;
791 #endif /* APIC_IO */
793 else {
794 cth = mpt->mp_cth;
795 if (cth == NULL)
796 panic("MP Configuration Table Header MISSING!");
798 cpu_apic_address = (vm_offset_t) cth->apic_address;
800 /* walk the table, recording info of interest */
801 totalSize = cth->base_table_length - sizeof(struct MPCTH);
802 position = (u_char *) cth + sizeof(struct MPCTH);
803 count = cth->entry_count;
805 while (count--) {
806 switch (type = *(u_char *) position) {
807 case 0: /* processor_entry */
808 if (((proc_entry_ptr)position)->cpu_flags
809 & PROCENTRY_FLAG_EN) {
810 ++mp_naps;
811 id_mask |= 1 <<
812 ((proc_entry_ptr)position)->apic_id;
814 break;
815 case 1: /* bus_entry */
816 ++mp_nbusses;
817 break;
818 case 2: /* io_apic_entry */
819 #ifdef APIC_IO
820 if (((io_apic_entry_ptr)position)->apic_flags
821 & IOAPICENTRY_FLAG_EN)
822 io_apic_address[mp_napics++] =
823 (vm_offset_t)((io_apic_entry_ptr)
824 position)->apic_address;
825 #endif
826 break;
827 case 3: /* int_entry */
828 #ifdef APIC_IO
829 ++nintrs;
830 #endif
831 break;
832 case 4: /* int_entry */
833 break;
834 default:
835 panic("mpfps Base Table HOSED!");
836 /* NOTREACHED */
839 totalSize -= basetable_entry_types[type].length;
840 position = (uint8_t *)position +
841 basetable_entry_types[type].length;
845 /* qualify the numbers */
846 if (mp_naps > MAXCPU) {
847 kprintf("Warning: only using %d of %d available CPUs!\n",
848 MAXCPU, mp_naps);
849 mp_naps = MAXCPU;
852 /* See if we need to fixup HT logical CPUs. */
853 mptable_hyperthread_fixup(id_mask);
855 --mp_naps; /* subtract the BSP */
860 * 2nd pass on motherboard's Intel MP specification table.
862 * sets:
863 * logical_cpus_mask
864 * ID_TO_IO(N), phy APIC ID to log CPU/IO table
865 * CPU_TO_ID(N), logical CPU to APIC ID table
866 * IO_TO_ID(N), logical IO to APIC ID table
867 * bus_data[N]
868 * io_apic_ints[N]
870 static int
871 mptable_pass2(struct mptable_pos *mpt)
873 struct PROCENTRY proc;
874 int x;
875 mpfps_t fps;
876 mpcth_t cth;
877 int totalSize;
878 void* position;
879 int count;
880 int type;
881 int apic, bus, cpu, intr;
882 int i;
884 POSTCODE(MPTABLE_PASS2_POST);
886 fps = mpt->mp_fps;
887 KKASSERT(fps != NULL);
889 /* Initialize fake proc entry for use with HT fixup. */
890 bzero(&proc, sizeof(proc));
891 proc.type = 0;
892 proc.cpu_flags = PROCENTRY_FLAG_EN;
894 #ifdef APIC_IO
895 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
896 M_DEVBUF, M_WAITOK);
897 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
898 M_DEVBUF, M_WAITOK | M_ZERO);
899 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + FIXUP_EXTRA_APIC_INTS),
900 M_DEVBUF, M_WAITOK);
901 #endif
902 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
903 M_DEVBUF, M_WAITOK);
905 #ifdef APIC_IO
906 for (i = 0; i < mp_napics; i++) {
907 ioapic[i] = permanent_io_mapping(io_apic_address[i]);
909 #endif
911 /* clear various tables */
912 for (x = 0; x < NAPICID; ++x) {
913 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */
914 #ifdef APIC_IO
915 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */
916 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */
917 #endif
920 /* clear bus data table */
921 for (x = 0; x < mp_nbusses; ++x)
922 bus_data[x].bus_id = 0xff;
924 #ifdef APIC_IO
925 /* clear IO APIC INT table */
926 for (x = 0; x < (nintrs + 1); ++x) {
927 io_apic_ints[x].int_type = 0xff;
928 io_apic_ints[x].int_vector = 0xff;
930 #endif
932 /* record whether PIC or virtual-wire mode */
933 machintr_setvar_simple(MACHINTR_VAR_IMCR_PRESENT, fps->mpfb2 & 0x80);
935 /* check for use of 'default' configuration */
936 if (fps->mpfb1 != 0)
937 return fps->mpfb1; /* return default configuration type */
939 cth = mpt->mp_cth;
940 if (cth == NULL)
941 panic("MP Configuration Table Header MISSING!");
943 /* walk the table, recording info of interest */
944 totalSize = cth->base_table_length - sizeof(struct MPCTH);
945 position = (u_char *) cth + sizeof(struct MPCTH);
946 count = cth->entry_count;
947 apic = bus = intr = 0;
948 cpu = 1; /* pre-count the BSP */
950 while (count--) {
951 switch (type = *(u_char *) position) {
952 case 0:
953 if (processor_entry(position, cpu))
954 ++cpu;
956 if (need_hyperthreading_fixup) {
958 * Create fake mptable processor entries
959 * and feed them to processor_entry() to
960 * enumerate the logical CPUs.
962 proc.apic_id = ((proc_entry_ptr)position)->apic_id;
963 for (i = 1; i < logical_cpus; i++) {
964 proc.apic_id++;
965 processor_entry(&proc, cpu);
966 logical_cpus_mask |= (1 << cpu);
967 cpu++;
970 break;
971 case 1:
972 if (bus_entry(position, bus))
973 ++bus;
974 break;
975 case 2:
976 #ifdef APIC_IO
977 if (io_apic_entry(position, apic))
978 ++apic;
979 #endif
980 break;
981 case 3:
982 #ifdef APIC_IO
983 if (int_entry(position, intr))
984 ++intr;
985 #endif
986 break;
987 case 4:
988 /* int_entry(position); */
989 break;
990 default:
991 panic("mpfps Base Table HOSED!");
992 /* NOTREACHED */
995 totalSize -= basetable_entry_types[type].length;
996 position = (uint8_t *)position + basetable_entry_types[type].length;
999 if (CPU_TO_ID(0) < 0)
1000 panic("NO BSP found!");
1002 /* report fact that its NOT a default configuration */
1003 return 0;
1007 * Check if we should perform a hyperthreading "fix-up" to
1008 * enumerate any logical CPU's that aren't already listed
1009 * in the table.
1011 * XXX: We assume that all of the physical CPUs in the
1012 * system have the same number of logical CPUs.
1014 * XXX: We assume that APIC ID's are allocated such that
1015 * the APIC ID's for a physical processor are aligned
1016 * with the number of logical CPU's in the processor.
1018 static void
1019 mptable_hyperthread_fixup(u_int id_mask)
1021 int i, id, lcpus_max;
1023 if ((cpu_feature & CPUID_HTT) == 0)
1024 return;
1026 lcpus_max = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
1027 if (lcpus_max <= 1)
1028 return;
1030 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
1032 * INSTRUCTION SET REFERENCE, A-M (#253666)
1033 * Page 3-181, Table 3-20
1034 * "The nearest power-of-2 integer that is not smaller
1035 * than EBX[23:16] is the number of unique initial APIC
1036 * IDs reserved for addressing different logical
1037 * processors in a physical package."
1039 for (i = 0; ; ++i) {
1040 if ((1 << i) >= lcpus_max) {
1041 lcpus_max = 1 << i;
1042 break;
1047 if (mp_naps == lcpus_max) {
1048 /* We have nothing to fix */
1049 return;
1050 } else if (mp_naps == 1) {
1051 /* XXX this may be incorrect */
1052 logical_cpus = lcpus_max;
1053 } else {
1054 int cur, prev, dist;
1057 * Calculate the distances between two nearest
1058 * APIC IDs. If all such distances are same,
1059 * then it is the number of missing cpus that
1060 * we are going to fill later.
1062 dist = cur = prev = -1;
1063 for (id = 0; id < MAXCPU; ++id) {
1064 if ((id_mask & 1 << id) == 0)
1065 continue;
1067 cur = id;
1068 if (prev >= 0) {
1069 int new_dist = cur - prev;
1071 if (dist < 0)
1072 dist = new_dist;
1075 * Make sure that all distances
1076 * between two nearest APIC IDs
1077 * are same.
1079 if (dist != new_dist)
1080 return;
1082 prev = cur;
1084 if (dist == 1)
1085 return;
1087 /* Must be power of 2 */
1088 if (dist & (dist - 1))
1089 return;
1091 /* Can't exceed CPU package capacity */
1092 if (dist > lcpus_max)
1093 logical_cpus = lcpus_max;
1094 else
1095 logical_cpus = dist;
1099 * For each APIC ID of a CPU that is set in the mask,
1100 * scan the other candidate APIC ID's for this
1101 * physical processor. If any of those ID's are
1102 * already in the table, then kill the fixup.
1104 for (id = 0; id < MAXCPU; id++) {
1105 if ((id_mask & 1 << id) == 0)
1106 continue;
1107 /* First, make sure we are on a logical_cpus boundary. */
1108 if (id % logical_cpus != 0)
1109 return;
1110 for (i = id + 1; i < id + logical_cpus; i++)
1111 if ((id_mask & 1 << i) != 0)
1112 return;
1116 * Ok, the ID's checked out, so enable the fixup. We have to fixup
1117 * mp_naps right now.
1119 need_hyperthreading_fixup = 1;
1120 mp_naps *= logical_cpus;
1123 static void
1124 mptable_map(struct mptable_pos *mpt, vm_paddr_t mpfps_paddr)
1126 mpfps_t fps = NULL;
1127 mpcth_t cth = NULL;
1128 vm_size_t cth_mapsz = 0;
1130 fps = pmap_mapdev(mpfps_paddr, sizeof(*fps));
1131 if (fps->pap != 0) {
1133 * Map configuration table header to get
1134 * the base table size
1136 cth = pmap_mapdev(fps->pap, sizeof(*cth));
1137 cth_mapsz = cth->base_table_length;
1138 pmap_unmapdev((vm_offset_t)cth, sizeof(*cth));
1141 * Map the base table
1143 cth = pmap_mapdev(fps->pap, cth_mapsz);
1146 mpt->mp_fps = fps;
1147 mpt->mp_cth = cth;
1148 mpt->mp_cth_mapsz = cth_mapsz;
1151 static void
1152 mptable_unmap(struct mptable_pos *mpt)
1154 if (mpt->mp_cth != NULL) {
1155 pmap_unmapdev((vm_offset_t)mpt->mp_cth, mpt->mp_cth_mapsz);
1156 mpt->mp_cth = NULL;
1157 mpt->mp_cth_mapsz = 0;
1159 if (mpt->mp_fps != NULL) {
1160 pmap_unmapdev((vm_offset_t)mpt->mp_fps, sizeof(*mpt->mp_fps));
1161 mpt->mp_fps = NULL;
1165 #ifdef APIC_IO
1167 void
1168 assign_apic_irq(int apic, int intpin, int irq)
1170 int x;
1172 if (int_to_apicintpin[irq].ioapic != -1)
1173 panic("assign_apic_irq: inconsistent table");
1175 int_to_apicintpin[irq].ioapic = apic;
1176 int_to_apicintpin[irq].int_pin = intpin;
1177 int_to_apicintpin[irq].apic_address = ioapic[apic];
1178 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1180 for (x = 0; x < nintrs; x++) {
1181 if ((io_apic_ints[x].int_type == 0 ||
1182 io_apic_ints[x].int_type == 3) &&
1183 io_apic_ints[x].int_vector == 0xff &&
1184 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1185 io_apic_ints[x].dst_apic_int == intpin)
1186 io_apic_ints[x].int_vector = irq;
1190 void
1191 revoke_apic_irq(int irq)
1193 int x;
1194 int oldapic;
1195 int oldintpin;
1197 if (int_to_apicintpin[irq].ioapic == -1)
1198 panic("revoke_apic_irq: inconsistent table");
1200 oldapic = int_to_apicintpin[irq].ioapic;
1201 oldintpin = int_to_apicintpin[irq].int_pin;
1203 int_to_apicintpin[irq].ioapic = -1;
1204 int_to_apicintpin[irq].int_pin = 0;
1205 int_to_apicintpin[irq].apic_address = NULL;
1206 int_to_apicintpin[irq].redirindex = 0;
1208 for (x = 0; x < nintrs; x++) {
1209 if ((io_apic_ints[x].int_type == 0 ||
1210 io_apic_ints[x].int_type == 3) &&
1211 io_apic_ints[x].int_vector != 0xff &&
1212 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1213 io_apic_ints[x].dst_apic_int == oldintpin)
1214 io_apic_ints[x].int_vector = 0xff;
1219 * Allocate an IRQ
1221 static void
1222 allocate_apic_irq(int intr)
1224 int apic;
1225 int intpin;
1226 int irq;
1228 if (io_apic_ints[intr].int_vector != 0xff)
1229 return; /* Interrupt handler already assigned */
1231 if (io_apic_ints[intr].int_type != 0 &&
1232 (io_apic_ints[intr].int_type != 3 ||
1233 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1234 io_apic_ints[intr].dst_apic_int == 0)))
1235 return; /* Not INT or ExtInt on != (0, 0) */
1237 irq = 0;
1238 while (irq < APIC_INTMAPSIZE &&
1239 int_to_apicintpin[irq].ioapic != -1)
1240 irq++;
1242 if (irq >= APIC_INTMAPSIZE)
1243 return; /* No free interrupt handlers */
1245 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1246 intpin = io_apic_ints[intr].dst_apic_int;
1248 assign_apic_irq(apic, intpin, irq);
1249 io_apic_setup_intpin(apic, intpin);
1253 static void
1254 swap_apic_id(int apic, int oldid, int newid)
1256 int x;
1257 int oapic;
1260 if (oldid == newid)
1261 return; /* Nothing to do */
1263 kprintf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1264 apic, oldid, newid);
1266 /* Swap physical APIC IDs in interrupt entries */
1267 for (x = 0; x < nintrs; x++) {
1268 if (io_apic_ints[x].dst_apic_id == oldid)
1269 io_apic_ints[x].dst_apic_id = newid;
1270 else if (io_apic_ints[x].dst_apic_id == newid)
1271 io_apic_ints[x].dst_apic_id = oldid;
1274 /* Swap physical APIC IDs in IO_TO_ID mappings */
1275 for (oapic = 0; oapic < mp_napics; oapic++)
1276 if (IO_TO_ID(oapic) == newid)
1277 break;
1279 if (oapic < mp_napics) {
1280 kprintf("Changing APIC ID for IO APIC #%d from "
1281 "%d to %d in MP table\n",
1282 oapic, newid, oldid);
1283 IO_TO_ID(oapic) = oldid;
1285 IO_TO_ID(apic) = newid;
1289 static void
1290 fix_id_to_io_mapping(void)
1292 int x;
1294 for (x = 0; x < NAPICID; x++)
1295 ID_TO_IO(x) = -1;
1297 for (x = 0; x <= mp_naps; x++)
1298 if (CPU_TO_ID(x) < NAPICID)
1299 ID_TO_IO(CPU_TO_ID(x)) = x;
1301 for (x = 0; x < mp_napics; x++)
1302 if (IO_TO_ID(x) < NAPICID)
1303 ID_TO_IO(IO_TO_ID(x)) = x;
1307 static int
1308 first_free_apic_id(void)
1310 int freeid, x;
1312 for (freeid = 0; freeid < NAPICID; freeid++) {
1313 for (x = 0; x <= mp_naps; x++)
1314 if (CPU_TO_ID(x) == freeid)
1315 break;
1316 if (x <= mp_naps)
1317 continue;
1318 for (x = 0; x < mp_napics; x++)
1319 if (IO_TO_ID(x) == freeid)
1320 break;
1321 if (x < mp_napics)
1322 continue;
1323 return freeid;
1325 return freeid;
1329 static int
1330 io_apic_id_acceptable(int apic, int id)
1332 int cpu; /* Logical CPU number */
1333 int oapic; /* Logical IO APIC number for other IO APIC */
1335 if (id >= NAPICID)
1336 return 0; /* Out of range */
1338 for (cpu = 0; cpu <= mp_naps; cpu++)
1339 if (CPU_TO_ID(cpu) == id)
1340 return 0; /* Conflict with CPU */
1342 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1343 if (IO_TO_ID(oapic) == id)
1344 return 0; /* Conflict with other APIC */
1346 return 1; /* ID is acceptable for IO APIC */
1349 static
1350 io_int *
1351 io_apic_find_int_entry(int apic, int pin)
1353 int x;
1355 /* search each of the possible INTerrupt sources */
1356 for (x = 0; x < nintrs; ++x) {
1357 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1358 (pin == io_apic_ints[x].dst_apic_int))
1359 return (&io_apic_ints[x]);
1361 return NULL;
1364 #endif
1367 * parse an Intel MP specification table
1369 static void
1370 mptable_fix(void)
1372 int x;
1373 #ifdef APIC_IO
1374 int id;
1375 int apic; /* IO APIC unit number */
1376 int freeid; /* Free physical APIC ID */
1377 int physid; /* Current physical IO APIC ID */
1378 io_int *io14;
1379 #endif
1380 int bus_0 = 0; /* Stop GCC warning */
1381 int bus_pci = 0; /* Stop GCC warning */
1382 int num_pci_bus;
1385 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1386 * did it wrong. The MP spec says that when more than 1 PCI bus
1387 * exists the BIOS must begin with bus entries for the PCI bus and use
1388 * actual PCI bus numbering. This implies that when only 1 PCI bus
1389 * exists the BIOS can choose to ignore this ordering, and indeed many
1390 * MP motherboards do ignore it. This causes a problem when the PCI
1391 * sub-system makes requests of the MP sub-system based on PCI bus
1392 * numbers. So here we look for the situation and renumber the
1393 * busses and associated INTs in an effort to "make it right".
1396 /* find bus 0, PCI bus, count the number of PCI busses */
1397 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1398 if (bus_data[x].bus_id == 0) {
1399 bus_0 = x;
1401 if (bus_data[x].bus_type == PCI) {
1402 ++num_pci_bus;
1403 bus_pci = x;
1407 * bus_0 == slot of bus with ID of 0
1408 * bus_pci == slot of last PCI bus encountered
1411 /* check the 1 PCI bus case for sanity */
1412 /* if it is number 0 all is well */
1413 if (num_pci_bus == 1 &&
1414 bus_data[bus_pci].bus_id != 0) {
1416 /* mis-numbered, swap with whichever bus uses slot 0 */
1418 /* swap the bus entry types */
1419 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1420 bus_data[bus_0].bus_type = PCI;
1422 #ifdef APIC_IO
1423 /* swap each relavant INTerrupt entry */
1424 id = bus_data[bus_pci].bus_id;
1425 for (x = 0; x < nintrs; ++x) {
1426 if (io_apic_ints[x].src_bus_id == id) {
1427 io_apic_ints[x].src_bus_id = 0;
1429 else if (io_apic_ints[x].src_bus_id == 0) {
1430 io_apic_ints[x].src_bus_id = id;
1433 #endif
1436 #ifdef APIC_IO
1437 /* Assign IO APIC IDs.
1439 * First try the existing ID. If a conflict is detected, try
1440 * the ID in the MP table. If a conflict is still detected, find
1441 * a free id.
1443 * We cannot use the ID_TO_IO table before all conflicts has been
1444 * resolved and the table has been corrected.
1446 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1448 /* First try to use the value set by the BIOS */
1449 physid = io_apic_get_id(apic);
1450 if (io_apic_id_acceptable(apic, physid)) {
1451 if (IO_TO_ID(apic) != physid)
1452 swap_apic_id(apic, IO_TO_ID(apic), physid);
1453 continue;
1456 /* Then check if the value in the MP table is acceptable */
1457 if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1458 continue;
1460 /* Last resort, find a free APIC ID and use it */
1461 freeid = first_free_apic_id();
1462 if (freeid >= NAPICID)
1463 panic("No free physical APIC IDs found");
1465 if (io_apic_id_acceptable(apic, freeid)) {
1466 swap_apic_id(apic, IO_TO_ID(apic), freeid);
1467 continue;
1469 panic("Free physical APIC ID not usable");
1471 fix_id_to_io_mapping();
1472 #endif
1474 #ifdef APIC_IO
1475 /* detect and fix broken Compaq MP table */
1476 if (apic_int_type(0, 0) == -1) {
1477 kprintf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1478 io_apic_ints[nintrs].int_type = 3; /* ExtInt */
1479 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */
1480 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1481 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1482 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */
1483 nintrs++;
1484 } else if (apic_int_type(0, 0) == 0) {
1485 kprintf("APIC_IO: MP table broken: ExtINT entry corrupt!\n");
1486 for (x = 0; x < nintrs; ++x)
1487 if ((0 == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1488 (0 == io_apic_ints[x].dst_apic_int)) {
1489 io_apic_ints[x].int_type = 3;
1490 io_apic_ints[x].int_vector = 0xff;
1491 break;
1496 * Fix missing IRQ 15 when IRQ 14 is an ISA interrupt. IDE
1497 * controllers universally come in pairs. If IRQ 14 is specified
1498 * as an ISA interrupt, then IRQ 15 had better be too.
1500 * [ Shuttle XPC / AMD Athlon X2 ]
1501 * The MPTable is missing an entry for IRQ 15. Note that the
1502 * ACPI table has an entry for both 14 and 15.
1504 if (apic_int_type(0, 14) == 0 && apic_int_type(0, 15) == -1) {
1505 kprintf("APIC_IO: MP table broken: IRQ 15 not ISA when IRQ 14 is!\n");
1506 io14 = io_apic_find_int_entry(0, 14);
1507 io_apic_ints[nintrs] = *io14;
1508 io_apic_ints[nintrs].src_bus_irq = 15;
1509 io_apic_ints[nintrs].dst_apic_int = 15;
1510 nintrs++;
1512 #endif
1515 #ifdef APIC_IO
1517 /* Assign low level interrupt handlers */
1518 static void
1519 setup_apic_irq_mapping(void)
1521 int x;
1522 int int_vector;
1524 /* Clear array */
1525 for (x = 0; x < APIC_INTMAPSIZE; x++) {
1526 int_to_apicintpin[x].ioapic = -1;
1527 int_to_apicintpin[x].int_pin = 0;
1528 int_to_apicintpin[x].apic_address = NULL;
1529 int_to_apicintpin[x].redirindex = 0;
1532 /* First assign ISA/EISA interrupts */
1533 for (x = 0; x < nintrs; x++) {
1534 int_vector = io_apic_ints[x].src_bus_irq;
1535 if (int_vector < APIC_INTMAPSIZE &&
1536 io_apic_ints[x].int_vector == 0xff &&
1537 int_to_apicintpin[int_vector].ioapic == -1 &&
1538 (apic_int_is_bus_type(x, ISA) ||
1539 apic_int_is_bus_type(x, EISA)) &&
1540 io_apic_ints[x].int_type == 0) {
1541 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1542 io_apic_ints[x].dst_apic_int,
1543 int_vector);
1547 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1548 for (x = 0; x < nintrs; x++) {
1549 if (io_apic_ints[x].dst_apic_int == 0 &&
1550 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1551 io_apic_ints[x].int_vector == 0xff &&
1552 int_to_apicintpin[0].ioapic == -1 &&
1553 io_apic_ints[x].int_type == 3) {
1554 assign_apic_irq(0, 0, 0);
1555 break;
1558 /* PCI interrupt assignment is deferred */
1561 #endif
1563 void
1564 mp_set_cpuids(int cpu_id, int apic_id)
1566 CPU_TO_ID(cpu_id) = apic_id;
1567 ID_TO_CPU(apic_id) = cpu_id;
1570 static int
1571 processor_entry(proc_entry_ptr entry, int cpu)
1573 KKASSERT(cpu > 0);
1575 /* check for usability */
1576 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1577 return 0;
1579 if(entry->apic_id >= NAPICID)
1580 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1581 /* check for BSP flag */
1582 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1583 mp_set_cpuids(0, entry->apic_id);
1584 return 0; /* its already been counted */
1587 /* add another AP to list, if less than max number of CPUs */
1588 else if (cpu < MAXCPU) {
1589 mp_set_cpuids(cpu, entry->apic_id);
1590 return 1;
1593 return 0;
1597 static int
1598 bus_entry(bus_entry_ptr entry, int bus)
1600 int x;
1601 char c, name[8];
1603 /* encode the name into an index */
1604 for (x = 0; x < 6; ++x) {
1605 if ((c = entry->bus_type[x]) == ' ')
1606 break;
1607 name[x] = c;
1609 name[x] = '\0';
1611 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1612 panic("unknown bus type: '%s'", name);
1614 bus_data[bus].bus_id = entry->bus_id;
1615 bus_data[bus].bus_type = x;
1617 return 1;
1620 #ifdef APIC_IO
1622 static int
1623 io_apic_entry(io_apic_entry_ptr entry, int apic)
1625 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1626 return 0;
1628 IO_TO_ID(apic) = entry->apic_id;
1629 if (entry->apic_id < NAPICID)
1630 ID_TO_IO(entry->apic_id) = apic;
1632 return 1;
1635 #endif
1637 static int
1638 lookup_bus_type(char *name)
1640 int x;
1642 for (x = 0; x < MAX_BUSTYPE; ++x)
1643 if (strcmp(bus_type_table[x].name, name) == 0)
1644 return bus_type_table[x].type;
1646 return UNKNOWN_BUSTYPE;
1649 #ifdef APIC_IO
1651 static int
1652 int_entry(int_entry_ptr entry, int intr)
1654 int apic;
1656 io_apic_ints[intr].int_type = entry->int_type;
1657 io_apic_ints[intr].int_flags = entry->int_flags;
1658 io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1659 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1660 if (entry->dst_apic_id == 255) {
1661 /* This signal goes to all IO APICS. Select an IO APIC
1662 with sufficient number of interrupt pins */
1663 for (apic = 0; apic < mp_napics; apic++)
1664 if (((io_apic_read(apic, IOAPIC_VER) &
1665 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1666 entry->dst_apic_int)
1667 break;
1668 if (apic < mp_napics)
1669 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1670 else
1671 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1672 } else
1673 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1674 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1676 return 1;
1679 static int
1680 apic_int_is_bus_type(int intr, int bus_type)
1682 int bus;
1684 for (bus = 0; bus < mp_nbusses; ++bus)
1685 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1686 && ((int) bus_data[bus].bus_type == bus_type))
1687 return 1;
1689 return 0;
1693 * Given a traditional ISA INT mask, return an APIC mask.
1695 u_int
1696 isa_apic_mask(u_int isa_mask)
1698 int isa_irq;
1699 int apic_pin;
1701 #if defined(SKIP_IRQ15_REDIRECT)
1702 if (isa_mask == (1 << 15)) {
1703 kprintf("skipping ISA IRQ15 redirect\n");
1704 return isa_mask;
1706 #endif /* SKIP_IRQ15_REDIRECT */
1708 isa_irq = ffs(isa_mask); /* find its bit position */
1709 if (isa_irq == 0) /* doesn't exist */
1710 return 0;
1711 --isa_irq; /* make it zero based */
1713 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */
1714 if (apic_pin == -1)
1715 return 0;
1717 return (1 << apic_pin); /* convert pin# to a mask */
1721 * Determine which APIC pin an ISA/EISA INT is attached to.
1723 #define INTTYPE(I) (io_apic_ints[(I)].int_type)
1724 #define INTPIN(I) (io_apic_ints[(I)].dst_apic_int)
1725 #define INTIRQ(I) (io_apic_ints[(I)].int_vector)
1726 #define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1728 #define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq)
1730 isa_apic_irq(int isa_irq)
1732 int intr;
1734 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1735 if (INTTYPE(intr) == 0) { /* standard INT */
1736 if (SRCBUSIRQ(intr) == isa_irq) {
1737 if (apic_int_is_bus_type(intr, ISA) ||
1738 apic_int_is_bus_type(intr, EISA)) {
1739 if (INTIRQ(intr) == 0xff)
1740 return -1; /* unassigned */
1741 return INTIRQ(intr); /* found */
1746 return -1; /* NOT found */
1751 * Determine which APIC pin a PCI INT is attached to.
1753 #define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id)
1754 #define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1755 #define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03)
1757 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1759 int intr;
1761 --pciInt; /* zero based */
1763 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1764 if ((INTTYPE(intr) == 0) /* standard INT */
1765 && (SRCBUSID(intr) == pciBus)
1766 && (SRCBUSDEVICE(intr) == pciDevice)
1767 && (SRCBUSLINE(intr) == pciInt)) { /* a candidate IRQ */
1768 if (apic_int_is_bus_type(intr, PCI)) {
1769 if (INTIRQ(intr) == 0xff)
1770 allocate_apic_irq(intr);
1771 if (INTIRQ(intr) == 0xff)
1772 return -1; /* unassigned */
1773 return INTIRQ(intr); /* exact match */
1778 return -1; /* NOT found */
1782 next_apic_irq(int irq)
1784 int intr, ointr;
1785 int bus, bustype;
1787 bus = 0;
1788 bustype = 0;
1789 for (intr = 0; intr < nintrs; intr++) {
1790 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1791 continue;
1792 bus = SRCBUSID(intr);
1793 bustype = apic_bus_type(bus);
1794 if (bustype != ISA &&
1795 bustype != EISA &&
1796 bustype != PCI)
1797 continue;
1798 break;
1800 if (intr >= nintrs) {
1801 return -1;
1803 for (ointr = intr + 1; ointr < nintrs; ointr++) {
1804 if (INTTYPE(ointr) != 0)
1805 continue;
1806 if (bus != SRCBUSID(ointr))
1807 continue;
1808 if (bustype == PCI) {
1809 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1810 continue;
1811 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1812 continue;
1814 if (bustype == ISA || bustype == EISA) {
1815 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1816 continue;
1818 if (INTPIN(intr) == INTPIN(ointr))
1819 continue;
1820 break;
1822 if (ointr >= nintrs) {
1823 return -1;
1825 return INTIRQ(ointr);
1827 #undef SRCBUSLINE
1828 #undef SRCBUSDEVICE
1829 #undef SRCBUSID
1830 #undef SRCBUSIRQ
1832 #undef INTPIN
1833 #undef INTIRQ
1834 #undef INTAPIC
1835 #undef INTTYPE
1837 #endif
1840 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1842 * XXX FIXME:
1843 * Exactly what this means is unclear at this point. It is a solution
1844 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard
1845 * could route any of the ISA INTs to upper (>15) IRQ values. But most would
1846 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1847 * option.
1850 undirect_isa_irq(int rirq)
1852 #if defined(READY)
1853 if (bootverbose)
1854 kprintf("Freeing redirected ISA irq %d.\n", rirq);
1855 /** FIXME: tickle the MB redirector chip */
1856 return /* XXX */;
1857 #else
1858 if (bootverbose)
1859 kprintf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1860 return 0;
1861 #endif /* READY */
1866 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1869 undirect_pci_irq(int rirq)
1871 #if defined(READY)
1872 if (bootverbose)
1873 kprintf("Freeing redirected PCI irq %d.\n", rirq);
1875 /** FIXME: tickle the MB redirector chip */
1876 return /* XXX */;
1877 #else
1878 if (bootverbose)
1879 kprintf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1880 rirq);
1881 return 0;
1882 #endif /* READY */
1887 * given a bus ID, return:
1888 * the bus type if found
1889 * -1 if NOT found
1892 apic_bus_type(int id)
1894 int x;
1896 for (x = 0; x < mp_nbusses; ++x)
1897 if (bus_data[x].bus_id == id)
1898 return bus_data[x].bus_type;
1900 return -1;
1903 #ifdef APIC_IO
1906 * given a LOGICAL APIC# and pin#, return:
1907 * the associated src bus ID if found
1908 * -1 if NOT found
1911 apic_src_bus_id(int apic, int pin)
1913 int x;
1915 /* search each of the possible INTerrupt sources */
1916 for (x = 0; x < nintrs; ++x)
1917 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1918 (pin == io_apic_ints[x].dst_apic_int))
1919 return (io_apic_ints[x].src_bus_id);
1921 return -1; /* NOT found */
1925 * given a LOGICAL APIC# and pin#, return:
1926 * the associated src bus IRQ if found
1927 * -1 if NOT found
1930 apic_src_bus_irq(int apic, int pin)
1932 int x;
1934 for (x = 0; x < nintrs; x++)
1935 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1936 (pin == io_apic_ints[x].dst_apic_int))
1937 return (io_apic_ints[x].src_bus_irq);
1939 return -1; /* NOT found */
1944 * given a LOGICAL APIC# and pin#, return:
1945 * the associated INTerrupt type if found
1946 * -1 if NOT found
1949 apic_int_type(int apic, int pin)
1951 int x;
1953 /* search each of the possible INTerrupt sources */
1954 for (x = 0; x < nintrs; ++x) {
1955 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1956 (pin == io_apic_ints[x].dst_apic_int))
1957 return (io_apic_ints[x].int_type);
1959 return -1; /* NOT found */
1963 * Return the IRQ associated with an APIC pin
1965 int
1966 apic_irq(int apic, int pin)
1968 int x;
1969 int res;
1971 for (x = 0; x < nintrs; ++x) {
1972 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1973 (pin == io_apic_ints[x].dst_apic_int)) {
1974 res = io_apic_ints[x].int_vector;
1975 if (res == 0xff)
1976 return -1;
1977 if (apic != int_to_apicintpin[res].ioapic)
1978 panic("apic_irq: inconsistent table %d/%d", apic, int_to_apicintpin[res].ioapic);
1979 if (pin != int_to_apicintpin[res].int_pin)
1980 panic("apic_irq inconsistent table (2)");
1981 return res;
1984 return -1;
1989 * given a LOGICAL APIC# and pin#, return:
1990 * the associated trigger mode if found
1991 * -1 if NOT found
1994 apic_trigger(int apic, int pin)
1996 int x;
1998 /* search each of the possible INTerrupt sources */
1999 for (x = 0; x < nintrs; ++x)
2000 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2001 (pin == io_apic_ints[x].dst_apic_int))
2002 return ((io_apic_ints[x].int_flags >> 2) & 0x03);
2004 return -1; /* NOT found */
2009 * given a LOGICAL APIC# and pin#, return:
2010 * the associated 'active' level if found
2011 * -1 if NOT found
2014 apic_polarity(int apic, int pin)
2016 int x;
2018 /* search each of the possible INTerrupt sources */
2019 for (x = 0; x < nintrs; ++x)
2020 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2021 (pin == io_apic_ints[x].dst_apic_int))
2022 return (io_apic_ints[x].int_flags & 0x03);
2024 return -1; /* NOT found */
2027 #endif
2030 * set data according to MP defaults
2031 * FIXME: probably not complete yet...
2033 static void
2034 mptable_default(int type)
2036 int ap_cpu_id, boot_cpu_id;
2037 #if defined(APIC_IO)
2038 int io_apic_id;
2039 int pin;
2040 #endif /* APIC_IO */
2042 #if 0
2043 kprintf(" MP default config type: %d\n", type);
2044 switch (type) {
2045 case 1:
2046 kprintf(" bus: ISA, APIC: 82489DX\n");
2047 break;
2048 case 2:
2049 kprintf(" bus: EISA, APIC: 82489DX\n");
2050 break;
2051 case 3:
2052 kprintf(" bus: EISA, APIC: 82489DX\n");
2053 break;
2054 case 4:
2055 kprintf(" bus: MCA, APIC: 82489DX\n");
2056 break;
2057 case 5:
2058 kprintf(" bus: ISA+PCI, APIC: Integrated\n");
2059 break;
2060 case 6:
2061 kprintf(" bus: EISA+PCI, APIC: Integrated\n");
2062 break;
2063 case 7:
2064 kprintf(" bus: MCA+PCI, APIC: Integrated\n");
2065 break;
2066 default:
2067 kprintf(" future type\n");
2068 break;
2069 /* NOTREACHED */
2071 #endif /* 0 */
2073 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
2074 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
2076 /* BSP */
2077 CPU_TO_ID(0) = boot_cpu_id;
2078 ID_TO_CPU(boot_cpu_id) = 0;
2080 /* one and only AP */
2081 CPU_TO_ID(1) = ap_cpu_id;
2082 ID_TO_CPU(ap_cpu_id) = 1;
2084 #if defined(APIC_IO)
2085 /* one and only IO APIC */
2086 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
2089 * sanity check, refer to MP spec section 3.6.6, last paragraph
2090 * necessary as some hardware isn't properly setting up the IO APIC
2092 #if defined(REALLY_ANAL_IOAPICID_VALUE)
2093 if (io_apic_id != 2) {
2094 #else
2095 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
2096 #endif /* REALLY_ANAL_IOAPICID_VALUE */
2097 io_apic_set_id(0, 2);
2098 io_apic_id = 2;
2100 IO_TO_ID(0) = io_apic_id;
2101 ID_TO_IO(io_apic_id) = 0;
2102 #endif /* APIC_IO */
2104 /* fill out bus entries */
2105 switch (type) {
2106 case 1:
2107 case 2:
2108 case 3:
2109 case 4:
2110 case 5:
2111 case 6:
2112 case 7:
2113 bus_data[0].bus_id = default_data[type - 1][1];
2114 bus_data[0].bus_type = default_data[type - 1][2];
2115 bus_data[1].bus_id = default_data[type - 1][3];
2116 bus_data[1].bus_type = default_data[type - 1][4];
2117 break;
2119 /* case 4: case 7: MCA NOT supported */
2120 default: /* illegal/reserved */
2121 panic("BAD default MP config: %d", type);
2122 /* NOTREACHED */
2125 #if defined(APIC_IO)
2126 /* general cases from MP v1.4, table 5-2 */
2127 for (pin = 0; pin < 16; ++pin) {
2128 io_apic_ints[pin].int_type = 0;
2129 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */
2130 io_apic_ints[pin].src_bus_id = 0;
2131 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */
2132 io_apic_ints[pin].dst_apic_id = io_apic_id;
2133 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */
2136 /* special cases from MP v1.4, table 5-2 */
2137 if (type == 2) {
2138 io_apic_ints[2].int_type = 0xff; /* N/C */
2139 io_apic_ints[13].int_type = 0xff; /* N/C */
2140 #if !defined(APIC_MIXED_MODE)
2141 /** FIXME: ??? */
2142 panic("sorry, can't support type 2 default yet");
2143 #endif /* APIC_MIXED_MODE */
2145 else
2146 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */
2148 if (type == 7)
2149 io_apic_ints[0].int_type = 0xff; /* N/C */
2150 else
2151 io_apic_ints[0].int_type = 3; /* vectored 8259 */
2152 #endif /* APIC_IO */
2156 * Map a physical memory address representing I/O into KVA. The I/O
2157 * block is assumed not to cross a page boundary.
2159 void *
2160 permanent_io_mapping(vm_paddr_t pa)
2162 vm_offset_t vaddr;
2163 int pgeflag;
2164 int i;
2166 KKASSERT(pa < 0x100000000LL);
2168 pgeflag = 0; /* not used for SMP yet */
2171 * If the requested physical address has already been incidently
2172 * mapped, just use the existing mapping. Otherwise create a new
2173 * mapping.
2175 for (i = IO_MAPPING_START_INDEX; i < SMPpt_alloc_index; ++i) {
2176 if (((vm_offset_t)SMPpt[i] & PG_FRAME) ==
2177 ((vm_offset_t)pa & PG_FRAME)) {
2178 break;
2181 if (i == SMPpt_alloc_index) {
2182 if (i == NPTEPG - 2) {
2183 panic("permanent_io_mapping: We ran out of space"
2184 " in SMPpt[]!");
2186 SMPpt[i] = (pt_entry_t)(PG_V | PG_RW | pgeflag |
2187 ((vm_offset_t)pa & PG_FRAME));
2188 ++SMPpt_alloc_index;
2190 vaddr = (vm_offset_t)CPU_prvspace + (i * PAGE_SIZE) +
2191 ((vm_offset_t)pa & PAGE_MASK);
2192 return ((void *)vaddr);
2196 * start each AP in our list
2198 static int
2199 start_all_aps(u_int boot_addr)
2201 int x, i, pg;
2202 int shift;
2203 u_char mpbiosreason;
2204 u_long mpbioswarmvec;
2205 struct mdglobaldata *gd;
2206 struct privatespace *ps;
2207 char *stack;
2208 uintptr_t kptbase;
2210 POSTCODE(START_ALL_APS_POST);
2212 /* Initialize BSP's local APIC */
2213 apic_initialize(TRUE);
2215 /* install the AP 1st level boot code */
2216 install_ap_tramp(boot_addr);
2219 /* save the current value of the warm-start vector */
2220 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
2221 outb(CMOS_REG, BIOS_RESET);
2222 mpbiosreason = inb(CMOS_DATA);
2224 /* set up temporary P==V mapping for AP boot */
2225 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
2226 kptbase = (uintptr_t)(void *)KPTphys;
2227 for (x = 0; x < NKPT; x++) {
2228 PTD[x] = (pd_entry_t)(PG_V | PG_RW |
2229 ((kptbase + x * PAGE_SIZE) & PG_FRAME));
2231 cpu_invltlb();
2233 /* start each AP */
2234 for (x = 1; x <= mp_naps; ++x) {
2236 /* This is a bit verbose, it will go away soon. */
2238 /* first page of AP's private space */
2239 pg = x * i386_btop(sizeof(struct privatespace));
2241 /* allocate new private data page(s) */
2242 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
2243 MDGLOBALDATA_BASEALLOC_SIZE);
2244 /* wire it into the private page table page */
2245 for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) {
2246 SMPpt[pg + i / PAGE_SIZE] = (pt_entry_t)
2247 (PG_V | PG_RW | vtophys_pte((char *)gd + i));
2249 pg += MDGLOBALDATA_BASEALLOC_PAGES;
2251 SMPpt[pg + 0] = 0; /* *gd_CMAP1 */
2252 SMPpt[pg + 1] = 0; /* *gd_CMAP2 */
2253 SMPpt[pg + 2] = 0; /* *gd_CMAP3 */
2254 SMPpt[pg + 3] = 0; /* *gd_PMAP1 */
2256 /* allocate and set up an idle stack data page */
2257 stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE);
2258 for (i = 0; i < UPAGES; i++) {
2259 SMPpt[pg + 4 + i] = (pt_entry_t)
2260 (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack));
2263 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
2264 bzero(gd, sizeof(*gd));
2265 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
2267 /* prime data page for it to use */
2268 mi_gdinit(&gd->mi, x);
2269 cpu_gdinit(gd, x);
2270 gd->gd_CMAP1 = &SMPpt[pg + 0];
2271 gd->gd_CMAP2 = &SMPpt[pg + 1];
2272 gd->gd_CMAP3 = &SMPpt[pg + 2];
2273 gd->gd_PMAP1 = &SMPpt[pg + 3];
2274 gd->gd_CADDR1 = ps->CPAGE1;
2275 gd->gd_CADDR2 = ps->CPAGE2;
2276 gd->gd_CADDR3 = ps->CPAGE3;
2277 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
2278 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
2279 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
2281 /* setup a vector to our boot code */
2282 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2283 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2284 outb(CMOS_REG, BIOS_RESET);
2285 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
2288 * Setup the AP boot stack
2290 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
2291 bootAP = x;
2293 /* attempt to start the Application Processor */
2294 CHECK_INIT(99); /* setup checkpoints */
2295 if (!start_ap(gd, boot_addr)) {
2296 kprintf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2297 CHECK_PRINT("trace"); /* show checkpoints */
2298 /* better panic as the AP may be running loose */
2299 kprintf("panic y/n? [y] ");
2300 if (cngetc() != 'n')
2301 panic("bye-bye");
2303 CHECK_PRINT("trace"); /* show checkpoints */
2305 /* record its version info */
2306 cpu_apic_versions[x] = cpu_apic_versions[0];
2309 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
2310 ncpus = x;
2312 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
2313 for (shift = 0; (1 << shift) <= ncpus; ++shift)
2315 --shift;
2316 ncpus2_shift = shift;
2317 ncpus2 = 1 << shift;
2318 ncpus2_mask = ncpus2 - 1;
2320 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
2321 if ((1 << shift) < ncpus)
2322 ++shift;
2323 ncpus_fit = 1 << shift;
2324 ncpus_fit_mask = ncpus_fit - 1;
2326 /* build our map of 'other' CPUs */
2327 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2328 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
2329 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
2331 /* fill in our (BSP) APIC version */
2332 cpu_apic_versions[0] = lapic.version;
2334 /* restore the warmstart vector */
2335 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2336 outb(CMOS_REG, BIOS_RESET);
2337 outb(CMOS_DATA, mpbiosreason);
2340 * NOTE! The idlestack for the BSP was setup by locore. Finish
2341 * up, clean out the P==V mapping we did earlier.
2343 for (x = 0; x < NKPT; x++)
2344 PTD[x] = 0;
2345 pmap_set_opt();
2347 /* number of APs actually started */
2348 return ncpus - 1;
2353 * load the 1st level AP boot code into base memory.
2356 /* targets for relocation */
2357 extern void bigJump(void);
2358 extern void bootCodeSeg(void);
2359 extern void bootDataSeg(void);
2360 extern void MPentry(void);
2361 extern u_int MP_GDT;
2362 extern u_int mp_gdtbase;
2364 static void
2365 install_ap_tramp(u_int boot_addr)
2367 int x;
2368 int size = *(int *) ((u_long) & bootMP_size);
2369 u_char *src = (u_char *) ((u_long) bootMP);
2370 u_char *dst = (u_char *) boot_addr + KERNBASE;
2371 u_int boot_base = (u_int) bootMP;
2372 u_int8_t *dst8;
2373 u_int16_t *dst16;
2374 u_int32_t *dst32;
2376 POSTCODE(INSTALL_AP_TRAMP_POST);
2378 for (x = 0; x < size; ++x)
2379 *dst++ = *src++;
2382 * modify addresses in code we just moved to basemem. unfortunately we
2383 * need fairly detailed info about mpboot.s for this to work. changes
2384 * to mpboot.s might require changes here.
2387 /* boot code is located in KERNEL space */
2388 dst = (u_char *) boot_addr + KERNBASE;
2390 /* modify the lgdt arg */
2391 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2392 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2394 /* modify the ljmp target for MPentry() */
2395 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2396 *dst32 = ((u_int) MPentry - KERNBASE);
2398 /* modify the target for boot code segment */
2399 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2400 dst8 = (u_int8_t *) (dst16 + 1);
2401 *dst16 = (u_int) boot_addr & 0xffff;
2402 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2404 /* modify the target for boot data segment */
2405 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2406 dst8 = (u_int8_t *) (dst16 + 1);
2407 *dst16 = (u_int) boot_addr & 0xffff;
2408 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2413 * this function starts the AP (application processor) identified
2414 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
2415 * to accomplish this. This is necessary because of the nuances
2416 * of the different hardware we might encounter. It ain't pretty,
2417 * but it seems to work.
2419 * NOTE: eventually an AP gets to ap_init(), which is called just
2420 * before the AP goes into the LWKT scheduler's idle loop.
2422 static int
2423 start_ap(struct mdglobaldata *gd, u_int boot_addr)
2425 int physical_cpu;
2426 int vector;
2427 u_long icr_lo, icr_hi;
2429 POSTCODE(START_AP_POST);
2431 /* get the PHYSICAL APIC ID# */
2432 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
2434 /* calculate the vector */
2435 vector = (boot_addr >> 12) & 0xff;
2437 /* Make sure the target cpu sees everything */
2438 wbinvd();
2441 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2442 * and running the target CPU. OR this INIT IPI might be latched (P5
2443 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2444 * ignored.
2447 /* setup the address for the target AP */
2448 icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2449 icr_hi |= (physical_cpu << 24);
2450 lapic.icr_hi = icr_hi;
2452 /* do an INIT IPI: assert RESET */
2453 icr_lo = lapic.icr_lo & 0xfff00000;
2454 lapic.icr_lo = icr_lo | 0x0000c500;
2456 /* wait for pending status end */
2457 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2458 /* spin */ ;
2460 /* do an INIT IPI: deassert RESET */
2461 lapic.icr_lo = icr_lo | 0x00008500;
2463 /* wait for pending status end */
2464 u_sleep(10000); /* wait ~10mS */
2465 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2466 /* spin */ ;
2469 * next we do a STARTUP IPI: the previous INIT IPI might still be
2470 * latched, (P5 bug) this 1st STARTUP would then terminate
2471 * immediately, and the previously started INIT IPI would continue. OR
2472 * the previous INIT IPI has already run. and this STARTUP IPI will
2473 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2474 * will run.
2477 /* do a STARTUP IPI */
2478 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2479 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2480 /* spin */ ;
2481 u_sleep(200); /* wait ~200uS */
2484 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2485 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2486 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2487 * recognized after hardware RESET or INIT IPI.
2490 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2491 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2492 /* spin */ ;
2493 u_sleep(200); /* wait ~200uS */
2495 /* wait for it to start, see ap_init() */
2496 set_apic_timer(5000000);/* == 5 seconds */
2497 while (read_apic_timer()) {
2498 if (smp_startup_mask & (1 << gd->mi.gd_cpuid))
2499 return 1; /* return SUCCESS */
2501 return 0; /* return FAILURE */
2506 * Lazy flush the TLB on all other CPU's. DEPRECATED.
2508 * If for some reason we were unable to start all cpus we cannot safely
2509 * use broadcast IPIs.
2511 void
2512 smp_invltlb(void)
2514 #ifdef SMP
2515 if (smp_startup_mask == smp_active_mask) {
2516 all_but_self_ipi(XINVLTLB_OFFSET);
2517 } else {
2518 selected_apic_ipi(smp_active_mask, XINVLTLB_OFFSET,
2519 APIC_DELMODE_FIXED);
2521 #endif
2525 * When called the executing CPU will send an IPI to all other CPUs
2526 * requesting that they halt execution.
2528 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2530 * - Signals all CPUs in map to stop.
2531 * - Waits for each to stop.
2533 * Returns:
2534 * -1: error
2535 * 0: NA
2536 * 1: ok
2538 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2539 * from executing at same time.
2542 stop_cpus(u_int map)
2544 map &= smp_active_mask;
2546 /* send the Xcpustop IPI to all CPUs in map */
2547 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2549 while ((stopped_cpus & map) != map)
2550 /* spin */ ;
2552 return 1;
2557 * Called by a CPU to restart stopped CPUs.
2559 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2561 * - Signals all CPUs in map to restart.
2562 * - Waits for each to restart.
2564 * Returns:
2565 * -1: error
2566 * 0: NA
2567 * 1: ok
2570 restart_cpus(u_int map)
2572 /* signal other cpus to restart */
2573 started_cpus = map & smp_active_mask;
2575 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2576 /* spin */ ;
2578 return 1;
2582 * This is called once the mpboot code has gotten us properly relocated
2583 * and the MMU turned on, etc. ap_init() is actually the idle thread,
2584 * and when it returns the scheduler will call the real cpu_idle() main
2585 * loop for the idlethread. Interrupts are disabled on entry and should
2586 * remain disabled at return.
2588 void
2589 ap_init(void)
2591 u_int apic_id;
2594 * Adjust smp_startup_mask to signal the BSP that we have started
2595 * up successfully. Note that we do not yet hold the BGL. The BSP
2596 * is waiting for our signal.
2598 * We can't set our bit in smp_active_mask yet because we are holding
2599 * interrupts physically disabled and remote cpus could deadlock
2600 * trying to send us an IPI.
2602 smp_startup_mask |= 1 << mycpu->gd_cpuid;
2603 cpu_mfence();
2606 * Interlock for finalization. Wait until mp_finish is non-zero,
2607 * then get the MP lock.
2609 * Note: We are in a critical section.
2611 * Note: We have to synchronize td_mpcount to our desired MP state
2612 * before calling cpu_try_mplock().
2614 * Note: we are the idle thread, we can only spin.
2616 * Note: The load fence is memory volatile and prevents the compiler
2617 * from improperly caching mp_finish, and the cpu from improperly
2618 * caching it.
2620 while (mp_finish == 0)
2621 cpu_lfence();
2622 ++curthread->td_mpcount;
2623 while (cpu_try_mplock() == 0)
2626 if (cpu_feature & CPUID_TSC) {
2628 * The BSP is constantly updating tsc0_offset, figure out the
2629 * relative difference to synchronize ktrdump.
2631 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
2634 /* BSP may have changed PTD while we're waiting for the lock */
2635 cpu_invltlb();
2637 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2638 lidt(&r_idt);
2639 #endif
2641 /* Build our map of 'other' CPUs. */
2642 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2644 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
2646 /* A quick check from sanity claus */
2647 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2648 if (mycpu->gd_cpuid != apic_id) {
2649 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
2650 kprintf("SMP: apic_id = %d\n", apic_id);
2651 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2652 panic("cpuid mismatch! boom!!");
2655 /* Initialize AP's local APIC for irq's */
2656 apic_initialize(FALSE);
2658 /* Set memory range attributes for this CPU to match the BSP */
2659 mem_range_AP_init();
2662 * Once we go active we must process any IPIQ messages that may
2663 * have been queued, because no actual IPI will occur until we
2664 * set our bit in the smp_active_mask. If we don't the IPI
2665 * message interlock could be left set which would also prevent
2666 * further IPIs.
2668 * The idle loop doesn't expect the BGL to be held and while
2669 * lwkt_switch() normally cleans things up this is a special case
2670 * because we returning almost directly into the idle loop.
2672 * The idle thread is never placed on the runq, make sure
2673 * nothing we've done put it there.
2675 KKASSERT(curthread->td_mpcount == 1);
2676 smp_active_mask |= 1 << mycpu->gd_cpuid;
2679 * Enable interrupts here. idle_restore will also do it, but
2680 * doing it here lets us clean up any strays that got posted to
2681 * the CPU during the AP boot while we are still in a critical
2682 * section.
2684 __asm __volatile("sti; pause; pause"::);
2685 mdcpu->gd_fpending = 0;
2686 mdcpu->gd_ipending = 0;
2688 initclocks_pcpu(); /* clock interrupts (via IPIs) */
2689 lwkt_process_ipiq();
2692 * Releasing the mp lock lets the BSP finish up the SMP init
2694 rel_mplock();
2695 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
2699 * Get SMP fully working before we start initializing devices.
2701 static
2702 void
2703 ap_finish(void)
2705 mp_finish = 1;
2706 if (bootverbose)
2707 kprintf("Finish MP startup\n");
2708 if (cpu_feature & CPUID_TSC)
2709 tsc0_offset = rdtsc();
2710 tsc_offsets[0] = 0;
2711 rel_mplock();
2712 while (smp_active_mask != smp_startup_mask) {
2713 cpu_lfence();
2714 if (cpu_feature & CPUID_TSC)
2715 tsc0_offset = rdtsc();
2717 while (try_mplock() == 0)
2719 if (bootverbose)
2720 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
2723 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
2725 void
2726 cpu_send_ipiq(int dcpu)
2728 if ((1 << dcpu) & smp_active_mask)
2729 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
2732 #if 0 /* single_apic_ipi_passive() not working yet */
2734 * Returns 0 on failure, 1 on success
2737 cpu_send_ipiq_passive(int dcpu)
2739 int r = 0;
2740 if ((1 << dcpu) & smp_active_mask) {
2741 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
2742 APIC_DELMODE_FIXED);
2744 return(r);
2746 #endif