mptable_lapic_enumerate(): Fix comment
[dragonfly.git] / sys / platform / pc32 / i386 / mp_machdep.c
blob2bf1b8baeb4566ec9f40cb637a5e8184d8504780
1 /*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
26 * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.60 2008/06/07 12:03:52 mneumann Exp $
29 #include "opt_cpu.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/memrange.h>
37 #include <sys/cons.h> /* cngetc() */
38 #include <sys/machintr.h>
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_extern.h>
45 #include <sys/lock.h>
46 #include <vm/vm_map.h>
47 #include <sys/user.h>
48 #ifdef GPROF
49 #include <sys/gmon.h>
50 #endif
52 #include <machine/smp.h>
53 #include <machine_base/apic/apicreg.h>
54 #include <machine/atomic.h>
55 #include <machine/cpufunc.h>
56 #include <machine_base/apic/mpapic.h>
57 #include <machine/psl.h>
58 #include <machine/segments.h>
59 #include <machine/tss.h>
60 #include <machine/specialreg.h>
61 #include <machine/globaldata.h>
63 #include <machine/md_var.h> /* setidt() */
64 #include <machine_base/icu/icu.h> /* IPIs */
65 #include <machine_base/isa/intr_machdep.h> /* IPIs */
67 #define FIXUP_EXTRA_APIC_INTS 8 /* additional entries we may create */
69 #define WARMBOOT_TARGET 0
70 #define WARMBOOT_OFF (KERNBASE + 0x0467)
71 #define WARMBOOT_SEG (KERNBASE + 0x0469)
73 #define BIOS_BASE (0xf0000)
74 #define BIOS_SIZE (0x10000)
75 #define BIOS_COUNT (BIOS_SIZE/4)
77 #define CMOS_REG (0x70)
78 #define CMOS_DATA (0x71)
79 #define BIOS_RESET (0x0f)
80 #define BIOS_WARM (0x0a)
82 #define PROCENTRY_FLAG_EN 0x01
83 #define PROCENTRY_FLAG_BP 0x02
84 #define IOAPICENTRY_FLAG_EN 0x01
87 /* MP Floating Pointer Structure */
88 typedef struct MPFPS {
89 char signature[4];
90 u_int32_t pap;
91 u_char length;
92 u_char spec_rev;
93 u_char checksum;
94 u_char mpfb1;
95 u_char mpfb2;
96 u_char mpfb3;
97 u_char mpfb4;
98 u_char mpfb5;
99 } *mpfps_t;
101 /* MP Configuration Table Header */
102 typedef struct MPCTH {
103 char signature[4];
104 u_short base_table_length;
105 u_char spec_rev;
106 u_char checksum;
107 u_char oem_id[8];
108 u_char product_id[12];
109 void *oem_table_pointer;
110 u_short oem_table_size;
111 u_short entry_count;
112 void *apic_address;
113 u_short extended_table_length;
114 u_char extended_table_checksum;
115 u_char reserved;
116 } *mpcth_t;
119 typedef struct PROCENTRY {
120 u_char type;
121 u_char apic_id;
122 u_char apic_version;
123 u_char cpu_flags;
124 u_long cpu_signature;
125 u_long feature_flags;
126 u_long reserved1;
127 u_long reserved2;
128 } *proc_entry_ptr;
130 typedef struct BUSENTRY {
131 u_char type;
132 u_char bus_id;
133 char bus_type[6];
134 } *bus_entry_ptr;
136 typedef struct IOAPICENTRY {
137 u_char type;
138 u_char apic_id;
139 u_char apic_version;
140 u_char apic_flags;
141 void *apic_address;
142 } *io_apic_entry_ptr;
144 typedef struct INTENTRY {
145 u_char type;
146 u_char int_type;
147 u_short int_flags;
148 u_char src_bus_id;
149 u_char src_bus_irq;
150 u_char dst_apic_id;
151 u_char dst_apic_int;
152 } *int_entry_ptr;
154 /* descriptions of MP basetable entries */
155 typedef struct BASETABLE_ENTRY {
156 u_char type;
157 u_char length;
158 char name[16];
159 } basetable_entry;
161 struct mptable_pos {
162 mpfps_t mp_fps;
163 mpcth_t mp_cth;
164 vm_size_t mp_cth_mapsz;
167 typedef int (*mptable_iter_func)(void *, const void *, int);
170 * this code MUST be enabled here and in mpboot.s.
171 * it follows the very early stages of AP boot by placing values in CMOS ram.
172 * it NORMALLY will never be needed and thus the primitive method for enabling.
175 #if defined(CHECK_POINTS)
176 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
177 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
179 #define CHECK_INIT(D); \
180 CHECK_WRITE(0x34, (D)); \
181 CHECK_WRITE(0x35, (D)); \
182 CHECK_WRITE(0x36, (D)); \
183 CHECK_WRITE(0x37, (D)); \
184 CHECK_WRITE(0x38, (D)); \
185 CHECK_WRITE(0x39, (D));
187 #define CHECK_PRINT(S); \
188 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
189 (S), \
190 CHECK_READ(0x34), \
191 CHECK_READ(0x35), \
192 CHECK_READ(0x36), \
193 CHECK_READ(0x37), \
194 CHECK_READ(0x38), \
195 CHECK_READ(0x39));
197 #else /* CHECK_POINTS */
199 #define CHECK_INIT(D)
200 #define CHECK_PRINT(S)
202 #endif /* CHECK_POINTS */
205 * Values to send to the POST hardware.
207 #define MP_BOOTADDRESS_POST 0x10
208 #define MP_PROBE_POST 0x11
209 #define MPTABLE_PASS1_POST 0x12
211 #define MP_START_POST 0x13
212 #define MP_ENABLE_POST 0x14
213 #define MPTABLE_PASS2_POST 0x15
215 #define START_ALL_APS_POST 0x16
216 #define INSTALL_AP_TRAMP_POST 0x17
217 #define START_AP_POST 0x18
219 #define MP_ANNOUNCE_POST 0x19
221 static int need_hyperthreading_fixup;
222 static u_int logical_cpus;
223 u_int logical_cpus_mask;
225 static int madt_probe_test;
226 TUNABLE_INT("hw.madt_probe_test", &madt_probe_test);
228 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
229 int current_postcode;
231 /** XXX FIXME: what system files declare these??? */
232 extern struct region_descriptor r_gdt, r_idt;
234 int mp_naps; /* # of Applications processors */
235 #ifdef APIC_IO
236 static int mp_nbusses; /* # of busses */
237 int mp_napics; /* # of IO APICs */
238 #endif
239 static vm_offset_t cpu_apic_address;
240 #ifdef APIC_IO
241 vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */
242 u_int32_t *io_apic_versions;
243 #endif
244 extern int nkpt;
246 u_int32_t cpu_apic_versions[MAXCPU];
247 int64_t tsc0_offset;
248 extern int64_t tsc_offsets[];
250 extern u_long ebda_addr;
252 #ifdef APIC_IO
253 struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE];
254 #endif
257 * APIC ID logical/physical mapping structures.
258 * We oversize these to simplify boot-time config.
260 int cpu_num_to_apic_id[NAPICID];
261 #ifdef APIC_IO
262 int io_num_to_apic_id[NAPICID];
263 #endif
264 int apic_id_to_logical[NAPICID];
266 /* AP uses this during bootstrap. Do not staticize. */
267 char *bootSTK;
268 static int bootAP;
270 /* Hotwire a 0->4MB V==P mapping */
271 extern pt_entry_t *KPTphys;
274 * SMP page table page. Setup by locore to point to a page table
275 * page from which we allocate per-cpu privatespace areas io_apics,
276 * and so forth.
279 #define IO_MAPPING_START_INDEX \
280 (SMP_MAXCPU * sizeof(struct privatespace) / PAGE_SIZE)
282 extern pt_entry_t *SMPpt;
283 static int SMPpt_alloc_index = IO_MAPPING_START_INDEX;
285 struct pcb stoppcbs[MAXCPU];
287 static basetable_entry basetable_entry_types[] =
289 {0, 20, "Processor"},
290 {1, 8, "Bus"},
291 {2, 8, "I/O APIC"},
292 {3, 8, "I/O INT"},
293 {4, 8, "Local INT"}
297 * Local data and functions.
300 static u_int boot_address;
301 static u_int base_memory;
302 static int mp_finish;
304 static void mp_enable(u_int boot_addr);
306 static int mptable_iterate_entries(const mpcth_t,
307 mptable_iter_func, void *);
308 static int mptable_probe(void);
309 static int mptable_check(vm_paddr_t);
310 static int mptable_search_sig(u_int32_t target, int count);
311 static void mptable_hyperthread_fixup(u_int id_mask);
312 static void mptable_pass1(struct mptable_pos *);
313 static int mptable_pass2(struct mptable_pos *);
314 static void mptable_default(int type);
315 static void mptable_fix(void);
316 static int mptable_map(struct mptable_pos *, vm_paddr_t);
317 static void mptable_unmap(struct mptable_pos *);
318 static void mptable_lapic_enumerate(struct mptable_pos *);
319 static void mptable_lapic_default(void);
321 #ifdef APIC_IO
322 static void setup_apic_irq_mapping(void);
323 static int apic_int_is_bus_type(int intr, int bus_type);
324 #endif
325 static int start_all_aps(u_int boot_addr);
326 static void install_ap_tramp(u_int boot_addr);
327 static int start_ap(struct mdglobaldata *gd, u_int boot_addr);
328 static void lapic_init(vm_offset_t);
330 static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
331 cpumask_t smp_active_mask = 1; /* which cpus are ready for IPIs etc? */
332 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
335 * Calculate usable address in base memory for AP trampoline code.
337 u_int
338 mp_bootaddress(u_int basemem)
340 POSTCODE(MP_BOOTADDRESS_POST);
342 base_memory = basemem;
344 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */
345 if ((base_memory - boot_address) < bootMP_size)
346 boot_address -= 4096; /* not enough, lower by 4k */
348 return boot_address;
353 * Look for an Intel MP spec table (ie, SMP capable hardware).
355 static int
356 mptable_probe(void)
358 int x;
359 u_int32_t target;
362 * Make sure our SMPpt[] page table is big enough to hold all the
363 * mappings we need.
365 KKASSERT(IO_MAPPING_START_INDEX < NPTEPG - 2);
367 POSTCODE(MP_PROBE_POST);
369 /* see if EBDA exists */
370 if (ebda_addr != 0) {
371 /* search first 1K of EBDA */
372 target = (u_int32_t)ebda_addr;
373 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
374 return x;
375 } else {
376 /* last 1K of base memory, effective 'top of base' passed in */
377 target = (u_int32_t)(base_memory - 0x400);
378 if ((x = mptable_search_sig(target, 1024 / 4)) > 0)
379 return x;
382 /* search the BIOS */
383 target = (u_int32_t)BIOS_BASE;
384 if ((x = mptable_search_sig(target, BIOS_COUNT)) > 0)
385 return x;
387 /* nothing found */
388 return 0;
391 struct mptable_check_cbarg {
392 int cpu_count;
393 int found_bsp;
396 static int
397 mptable_check_callback(void *xarg, const void *pos, int type)
399 const struct PROCENTRY *ent;
400 struct mptable_check_cbarg *arg = xarg;
402 if (type != 0)
403 return 0;
404 ent = pos;
406 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
407 return 0;
408 arg->cpu_count++;
410 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
411 if (arg->found_bsp) {
412 kprintf("more than one BSP in base MP table\n");
413 return EINVAL;
415 arg->found_bsp = 1;
417 return 0;
420 static int
421 mptable_check(vm_paddr_t mpfps_paddr)
423 struct mptable_pos mpt;
424 struct mptable_check_cbarg arg;
425 mpcth_t cth;
426 int error;
428 if (mpfps_paddr == 0)
429 return EOPNOTSUPP;
431 error = mptable_map(&mpt, mpfps_paddr);
432 if (error)
433 return error;
435 if (mpt.mp_fps->mpfb1 != 0)
436 goto done;
438 error = EINVAL;
440 cth = mpt.mp_cth;
441 if (cth == NULL)
442 goto done;
443 if (cth->apic_address == 0)
444 goto done;
446 bzero(&arg, sizeof(arg));
447 error = mptable_iterate_entries(cth, mptable_check_callback, &arg);
448 if (!error) {
449 if (arg.cpu_count == 0) {
450 kprintf("MP table contains no processor entries\n");
451 error = EINVAL;
452 } else if (!arg.found_bsp) {
453 kprintf("MP table does not contains BSP entry\n");
454 error = EINVAL;
457 done:
458 mptable_unmap(&mpt);
459 return error;
462 static int
463 mptable_iterate_entries(const mpcth_t cth, mptable_iter_func func, void *arg)
465 int count, total_size;
466 const void *position;
468 KKASSERT(cth->base_table_length >= sizeof(struct MPCTH));
469 total_size = cth->base_table_length - sizeof(struct MPCTH);
470 position = (const uint8_t *)cth + sizeof(struct MPCTH);
471 count = cth->entry_count;
473 while (count--) {
474 int type, error;
476 KKASSERT(total_size >= 0);
477 if (total_size == 0) {
478 kprintf("invalid base MP table, "
479 "entry count and length mismatch\n");
480 return EINVAL;
483 type = *(const uint8_t *)position;
484 switch (type) {
485 case 0: /* processor_entry */
486 case 1: /* bus_entry */
487 case 2: /* io_apic_entry */
488 case 3: /* int_entry */
489 case 4: /* int_entry */
490 break;
491 default:
492 kprintf("unknown base MP table entry type %d\n", type);
493 return EINVAL;
496 if (total_size < basetable_entry_types[type].length) {
497 kprintf("invalid base MP table length, "
498 "does not contain all entries\n");
499 return EINVAL;
501 total_size -= basetable_entry_types[type].length;
503 error = func(arg, position, type);
504 if (error)
505 return error;
507 position = (const uint8_t *)position +
508 basetable_entry_types[type].length;
510 return 0;
515 * Startup the SMP processors.
517 void
518 mp_start(void)
520 POSTCODE(MP_START_POST);
521 mp_enable(boot_address);
526 * Print various information about the SMP system hardware and setup.
528 void
529 mp_announce(void)
531 int x;
533 POSTCODE(MP_ANNOUNCE_POST);
535 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
536 kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
537 kprintf(", version: 0x%08x", cpu_apic_versions[0]);
538 kprintf(", at 0x%08x\n", cpu_apic_address);
539 for (x = 1; x <= mp_naps; ++x) {
540 kprintf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x));
541 kprintf(", version: 0x%08x", cpu_apic_versions[x]);
542 kprintf(", at 0x%08x\n", cpu_apic_address);
545 #if defined(APIC_IO)
546 for (x = 0; x < mp_napics; ++x) {
547 kprintf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
548 kprintf(", version: 0x%08x", io_apic_versions[x]);
549 kprintf(", at 0x%08x\n", io_apic_address[x]);
551 #else
552 kprintf(" Warning: APIC I/O disabled\n");
553 #endif /* APIC_IO */
557 * AP cpu's call this to sync up protected mode.
559 * WARNING! We must ensure that the cpu is sufficiently initialized to
560 * be able to use to the FP for our optimized bzero/bcopy code before
561 * we enter more mainstream C code.
563 * WARNING! %fs is not set up on entry. This routine sets up %fs.
565 void
566 init_secondary(void)
568 int gsel_tss;
569 int x, myid = bootAP;
570 u_int cr0;
571 struct mdglobaldata *md;
572 struct privatespace *ps;
574 ps = &CPU_prvspace[myid];
576 gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
577 gdt_segs[GPROC0_SEL].ssd_base =
578 (int) &ps->mdglobaldata.gd_common_tss;
579 ps->mdglobaldata.mi.gd_prvspace = ps;
581 for (x = 0; x < NGDT; x++) {
582 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
585 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
586 r_gdt.rd_base = (int) &gdt[myid * NGDT];
587 lgdt(&r_gdt); /* does magic intra-segment return */
589 lidt(&r_idt);
591 lldt(_default_ldt);
592 mdcpu->gd_currentldt = _default_ldt;
594 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
595 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
597 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
599 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
600 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
601 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
602 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
603 md->gd_common_tssd = *md->gd_tss_gdt;
604 ltr(gsel_tss);
607 * Set to a known state:
608 * Set by mpboot.s: CR0_PG, CR0_PE
609 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
611 cr0 = rcr0();
612 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
613 load_cr0(cr0);
614 pmap_set_opt(); /* PSE/4MB pages, etc */
616 /* set up CPU registers and state */
617 cpu_setregs();
619 /* set up FPU state on the AP */
620 npxinit(__INITIAL_NPXCW__);
622 /* set up SSE registers */
623 enable_sse();
626 /*******************************************************************
627 * local functions and data
631 * start the SMP system
633 static void
634 mp_enable(u_int boot_addr)
636 int x;
637 #if defined(APIC_IO)
638 int apic;
639 u_int ux;
640 #endif /* APIC_IO */
641 vm_paddr_t mpfps_paddr;
643 POSTCODE(MP_ENABLE_POST);
645 if (madt_probe_test) {
646 mpfps_paddr = 0;
647 } else {
648 mpfps_paddr = mptable_probe();
649 if (mptable_check(mpfps_paddr))
650 mpfps_paddr = 0;
653 if (mpfps_paddr) {
654 struct mptable_pos mpt;
656 mptable_map(&mpt, mpfps_paddr);
658 mptable_lapic_enumerate(&mpt);
661 * We can safely map physical memory into SMPpt after
662 * mptable_pass1() completes.
664 mptable_pass1(&mpt);
667 * Examine the MP table for needed info
669 x = mptable_pass2(&mpt);
671 mptable_unmap(&mpt);
674 * Can't process default configs till the
675 * CPU APIC is pmapped
677 if (x)
678 mptable_default(x);
680 /* Post scan cleanup */
681 mptable_fix();
682 } else {
683 vm_paddr_t madt_paddr;
684 vm_offset_t lapic_addr;
685 int bsp_apic_id;
687 madt_paddr = madt_probe();
688 if (madt_paddr == 0)
689 panic("mp_enable: madt_probe failed\n");
691 lapic_addr = madt_pass1(madt_paddr);
692 if (lapic_addr == 0)
693 panic("mp_enable: no local apic (madt)!\n");
695 lapic_init(lapic_addr);
697 bsp_apic_id = APIC_ID(lapic.id);
698 if (madt_pass2(madt_paddr, bsp_apic_id))
699 panic("mp_enable: madt_pass2 failed\n");
702 #if defined(APIC_IO)
704 setup_apic_irq_mapping();
706 /* fill the LOGICAL io_apic_versions table */
707 for (apic = 0; apic < mp_napics; ++apic) {
708 ux = io_apic_read(apic, IOAPIC_VER);
709 io_apic_versions[apic] = ux;
710 io_apic_set_id(apic, IO_TO_ID(apic));
713 /* program each IO APIC in the system */
714 for (apic = 0; apic < mp_napics; ++apic)
715 if (io_apic_setup(apic) < 0)
716 panic("IO APIC setup failure");
718 #endif /* APIC_IO */
721 * These are required for SMP operation
724 /* install a 'Spurious INTerrupt' vector */
725 setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
726 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
728 /* install an inter-CPU IPI for TLB invalidation */
729 setidt(XINVLTLB_OFFSET, Xinvltlb,
730 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
732 /* install an inter-CPU IPI for IPIQ messaging */
733 setidt(XIPIQ_OFFSET, Xipiq,
734 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
736 /* install a timer vector */
737 setidt(XTIMER_OFFSET, Xtimer,
738 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
740 /* install an inter-CPU IPI for CPU stop/restart */
741 setidt(XCPUSTOP_OFFSET, Xcpustop,
742 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
744 /* start each Application Processor */
745 start_all_aps(boot_addr);
750 * look for the MP spec signature
753 /* string defined by the Intel MP Spec as identifying the MP table */
754 #define MP_SIG 0x5f504d5f /* _MP_ */
755 #define NEXT(X) ((X) += 4)
756 static int
757 mptable_search_sig(u_int32_t target, int count)
759 vm_size_t map_size;
760 u_int32_t *addr;
761 int x, ret;
763 KKASSERT(target != 0);
765 map_size = count * sizeof(u_int32_t);
766 addr = pmap_mapdev((vm_paddr_t)target, map_size);
768 ret = 0;
769 for (x = 0; x < count; NEXT(x)) {
770 if (addr[x] == MP_SIG) {
771 /* make array index a byte index */
772 ret = target + (x * sizeof(u_int32_t));
773 break;
777 pmap_unmapdev((vm_offset_t)addr, map_size);
778 return ret;
782 typedef struct BUSDATA {
783 u_char bus_id;
784 enum busTypes bus_type;
785 } bus_datum;
787 typedef struct INTDATA {
788 u_char int_type;
789 u_short int_flags;
790 u_char src_bus_id;
791 u_char src_bus_irq;
792 u_char dst_apic_id;
793 u_char dst_apic_int;
794 u_char int_vector;
795 } io_int, local_int;
797 typedef struct BUSTYPENAME {
798 u_char type;
799 char name[7];
800 } bus_type_name;
802 static bus_type_name bus_type_table[] =
804 {CBUS, "CBUS"},
805 {CBUSII, "CBUSII"},
806 {EISA, "EISA"},
807 {MCA, "MCA"},
808 {UNKNOWN_BUSTYPE, "---"},
809 {ISA, "ISA"},
810 {MCA, "MCA"},
811 {UNKNOWN_BUSTYPE, "---"},
812 {UNKNOWN_BUSTYPE, "---"},
813 {UNKNOWN_BUSTYPE, "---"},
814 {UNKNOWN_BUSTYPE, "---"},
815 {UNKNOWN_BUSTYPE, "---"},
816 {PCI, "PCI"},
817 {UNKNOWN_BUSTYPE, "---"},
818 {UNKNOWN_BUSTYPE, "---"},
819 {UNKNOWN_BUSTYPE, "---"},
820 {UNKNOWN_BUSTYPE, "---"},
821 {XPRESS, "XPRESS"},
822 {UNKNOWN_BUSTYPE, "---"}
824 /* from MP spec v1.4, table 5-1 */
825 static int default_data[7][5] =
827 /* nbus, id0, type0, id1, type1 */
828 {1, 0, ISA, 255, 255},
829 {1, 0, EISA, 255, 255},
830 {1, 0, EISA, 255, 255},
831 {1, 0, MCA, 255, 255},
832 {2, 0, ISA, 1, PCI},
833 {2, 0, EISA, 1, PCI},
834 {2, 0, MCA, 1, PCI}
838 #ifdef APIC_IO
840 /* the bus data */
841 static bus_datum *bus_data;
843 /* the IO INT data, one entry per possible APIC INTerrupt */
844 static io_int *io_apic_ints;
845 static int nintrs;
847 #endif
849 static int processor_entry (const struct PROCENTRY *entry, int cpu);
850 #ifdef APIC_IO
851 static int bus_entry (bus_entry_ptr entry, int bus);
852 static int io_apic_entry (io_apic_entry_ptr entry, int apic);
853 static int int_entry (int_entry_ptr entry, int intr);
854 #endif
855 static int lookup_bus_type (char *name);
859 * 1st pass on motherboard's Intel MP specification table.
861 * determines:
862 * io_apic_address[N]
863 * mp_nbusses
864 * mp_napics
865 * nintrs
867 static void
868 mptable_pass1(struct mptable_pos *mpt)
870 #ifdef APIC_IO
871 int x;
872 #endif
873 mpfps_t fps;
874 mpcth_t cth;
875 int totalSize;
876 void* position;
877 int count;
878 int type;
880 POSTCODE(MPTABLE_PASS1_POST);
882 fps = mpt->mp_fps;
883 KKASSERT(fps != NULL);
885 #ifdef APIC_IO
886 /* clear various tables */
887 for (x = 0; x < NAPICID; ++x) {
888 io_apic_address[x] = ~0; /* IO APIC address table */
890 #endif
892 #ifdef APIC_IO
893 mp_nbusses = 0;
894 mp_napics = 0;
895 nintrs = 0;
896 #endif
898 /* check for use of 'default' configuration */
899 if (fps->mpfb1 != 0) {
900 #ifdef APIC_IO
901 io_apic_address[0] = DEFAULT_IO_APIC_BASE;
902 mp_nbusses = default_data[fps->mpfb1 - 1][0];
903 mp_napics = 1;
904 nintrs = 16;
905 #endif /* APIC_IO */
907 else {
908 cth = mpt->mp_cth;
909 KKASSERT(cth != NULL);
911 /* walk the table, recording info of interest */
912 totalSize = cth->base_table_length - sizeof(struct MPCTH);
913 position = (u_char *) cth + sizeof(struct MPCTH);
914 count = cth->entry_count;
916 while (count--) {
917 switch (type = *(u_char *) position) {
918 case 0: /* processor_entry */
919 break;
920 case 1: /* bus_entry */
921 #ifdef APIC_IO
922 ++mp_nbusses;
923 #endif
924 break;
925 case 2: /* io_apic_entry */
926 #ifdef APIC_IO
927 if (((io_apic_entry_ptr)position)->apic_flags
928 & IOAPICENTRY_FLAG_EN)
929 io_apic_address[mp_napics++] =
930 (vm_offset_t)((io_apic_entry_ptr)
931 position)->apic_address;
932 #endif
933 break;
934 case 3: /* int_entry */
935 #ifdef APIC_IO
936 ++nintrs;
937 #endif
938 break;
939 case 4: /* int_entry */
940 break;
941 default:
942 panic("mpfps Base Table HOSED!");
943 /* NOTREACHED */
946 totalSize -= basetable_entry_types[type].length;
947 position = (uint8_t *)position +
948 basetable_entry_types[type].length;
955 * 2nd pass on motherboard's Intel MP specification table.
957 * sets:
958 * ID_TO_IO(N), phy APIC ID to log CPU/IO table
959 * IO_TO_ID(N), logical IO to APIC ID table
960 * bus_data[N]
961 * io_apic_ints[N]
963 static int
964 mptable_pass2(struct mptable_pos *mpt)
966 int x;
967 mpfps_t fps;
968 mpcth_t cth;
969 int totalSize;
970 void* position;
971 int count;
972 int type;
973 int apic, bus, intr;
974 int i;
976 POSTCODE(MPTABLE_PASS2_POST);
978 fps = mpt->mp_fps;
979 KKASSERT(fps != NULL);
981 #ifdef APIC_IO
982 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
983 M_DEVBUF, M_WAITOK);
984 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
985 M_DEVBUF, M_WAITOK | M_ZERO);
986 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + FIXUP_EXTRA_APIC_INTS),
987 M_DEVBUF, M_WAITOK);
988 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
989 M_DEVBUF, M_WAITOK);
990 #endif
992 #ifdef APIC_IO
993 for (i = 0; i < mp_napics; i++) {
994 ioapic[i] = permanent_io_mapping(io_apic_address[i]);
996 #endif
998 /* clear various tables */
999 for (x = 0; x < NAPICID; ++x) {
1000 #ifdef APIC_IO
1001 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */
1002 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */
1003 #endif
1006 #ifdef APIC_IO
1007 /* clear bus data table */
1008 for (x = 0; x < mp_nbusses; ++x)
1009 bus_data[x].bus_id = 0xff;
1011 /* clear IO APIC INT table */
1012 for (x = 0; x < (nintrs + 1); ++x) {
1013 io_apic_ints[x].int_type = 0xff;
1014 io_apic_ints[x].int_vector = 0xff;
1016 #endif
1018 /* record whether PIC or virtual-wire mode */
1019 machintr_setvar_simple(MACHINTR_VAR_IMCR_PRESENT, fps->mpfb2 & 0x80);
1021 /* check for use of 'default' configuration */
1022 if (fps->mpfb1 != 0)
1023 return fps->mpfb1; /* return default configuration type */
1025 cth = mpt->mp_cth;
1026 KKASSERT(cth != NULL);
1028 /* walk the table, recording info of interest */
1029 totalSize = cth->base_table_length - sizeof(struct MPCTH);
1030 position = (u_char *) cth + sizeof(struct MPCTH);
1031 count = cth->entry_count;
1032 apic = bus = intr = 0;
1034 while (count--) {
1035 switch (type = *(u_char *) position) {
1036 case 0:
1037 break;
1038 case 1:
1039 #ifdef APIC_IO
1040 if (bus_entry(position, bus))
1041 ++bus;
1042 #endif
1043 break;
1044 case 2:
1045 #ifdef APIC_IO
1046 if (io_apic_entry(position, apic))
1047 ++apic;
1048 #endif
1049 break;
1050 case 3:
1051 #ifdef APIC_IO
1052 if (int_entry(position, intr))
1053 ++intr;
1054 #endif
1055 break;
1056 case 4:
1057 /* int_entry(position); */
1058 break;
1059 default:
1060 panic("mpfps Base Table HOSED!");
1061 /* NOTREACHED */
1064 totalSize -= basetable_entry_types[type].length;
1065 position = (uint8_t *)position + basetable_entry_types[type].length;
1068 /* report fact that its NOT a default configuration */
1069 return 0;
1073 * Check if we should perform a hyperthreading "fix-up" to
1074 * enumerate any logical CPU's that aren't already listed
1075 * in the table.
1077 * XXX: We assume that all of the physical CPUs in the
1078 * system have the same number of logical CPUs.
1080 * XXX: We assume that APIC ID's are allocated such that
1081 * the APIC ID's for a physical processor are aligned
1082 * with the number of logical CPU's in the processor.
1084 static void
1085 mptable_hyperthread_fixup(u_int id_mask)
1087 int i, id, lcpus_max;
1089 if ((cpu_feature & CPUID_HTT) == 0)
1090 return;
1092 lcpus_max = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
1093 if (lcpus_max <= 1)
1094 return;
1096 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
1098 * INSTRUCTION SET REFERENCE, A-M (#253666)
1099 * Page 3-181, Table 3-20
1100 * "The nearest power-of-2 integer that is not smaller
1101 * than EBX[23:16] is the number of unique initial APIC
1102 * IDs reserved for addressing different logical
1103 * processors in a physical package."
1105 for (i = 0; ; ++i) {
1106 if ((1 << i) >= lcpus_max) {
1107 lcpus_max = 1 << i;
1108 break;
1113 if (mp_naps == lcpus_max) {
1114 /* We have nothing to fix */
1115 return;
1116 } else if (mp_naps == 1) {
1117 /* XXX this may be incorrect */
1118 logical_cpus = lcpus_max;
1119 } else {
1120 int cur, prev, dist;
1123 * Calculate the distances between two nearest
1124 * APIC IDs. If all such distances are same,
1125 * then it is the number of missing cpus that
1126 * we are going to fill later.
1128 dist = cur = prev = -1;
1129 for (id = 0; id < MAXCPU; ++id) {
1130 if ((id_mask & 1 << id) == 0)
1131 continue;
1133 cur = id;
1134 if (prev >= 0) {
1135 int new_dist = cur - prev;
1137 if (dist < 0)
1138 dist = new_dist;
1141 * Make sure that all distances
1142 * between two nearest APIC IDs
1143 * are same.
1145 if (dist != new_dist)
1146 return;
1148 prev = cur;
1150 if (dist == 1)
1151 return;
1153 /* Must be power of 2 */
1154 if (dist & (dist - 1))
1155 return;
1157 /* Can't exceed CPU package capacity */
1158 if (dist > lcpus_max)
1159 logical_cpus = lcpus_max;
1160 else
1161 logical_cpus = dist;
1165 * For each APIC ID of a CPU that is set in the mask,
1166 * scan the other candidate APIC ID's for this
1167 * physical processor. If any of those ID's are
1168 * already in the table, then kill the fixup.
1170 for (id = 0; id < MAXCPU; id++) {
1171 if ((id_mask & 1 << id) == 0)
1172 continue;
1173 /* First, make sure we are on a logical_cpus boundary. */
1174 if (id % logical_cpus != 0)
1175 return;
1176 for (i = id + 1; i < id + logical_cpus; i++)
1177 if ((id_mask & 1 << i) != 0)
1178 return;
1182 * Ok, the ID's checked out, so enable the fixup. We have to fixup
1183 * mp_naps right now.
1185 need_hyperthreading_fixup = 1;
1186 mp_naps *= logical_cpus;
1189 static int
1190 mptable_map(struct mptable_pos *mpt, vm_paddr_t mpfps_paddr)
1192 mpfps_t fps = NULL;
1193 mpcth_t cth = NULL;
1194 vm_size_t cth_mapsz = 0;
1196 bzero(mpt, sizeof(*mpt));
1198 fps = pmap_mapdev(mpfps_paddr, sizeof(*fps));
1199 if (fps->pap != 0) {
1201 * Map configuration table header to get
1202 * the base table size
1204 cth = pmap_mapdev(fps->pap, sizeof(*cth));
1205 cth_mapsz = cth->base_table_length;
1206 pmap_unmapdev((vm_offset_t)cth, sizeof(*cth));
1208 if (cth_mapsz < sizeof(*cth)) {
1209 kprintf("invalid base MP table length %d\n",
1210 (int)cth_mapsz);
1211 pmap_unmapdev((vm_offset_t)fps, sizeof(*fps));
1212 return EINVAL;
1216 * Map the base table
1218 cth = pmap_mapdev(fps->pap, cth_mapsz);
1221 mpt->mp_fps = fps;
1222 mpt->mp_cth = cth;
1223 mpt->mp_cth_mapsz = cth_mapsz;
1225 return 0;
1228 static void
1229 mptable_unmap(struct mptable_pos *mpt)
1231 if (mpt->mp_cth != NULL) {
1232 pmap_unmapdev((vm_offset_t)mpt->mp_cth, mpt->mp_cth_mapsz);
1233 mpt->mp_cth = NULL;
1234 mpt->mp_cth_mapsz = 0;
1236 if (mpt->mp_fps != NULL) {
1237 pmap_unmapdev((vm_offset_t)mpt->mp_fps, sizeof(*mpt->mp_fps));
1238 mpt->mp_fps = NULL;
1242 #ifdef APIC_IO
1244 void
1245 assign_apic_irq(int apic, int intpin, int irq)
1247 int x;
1249 if (int_to_apicintpin[irq].ioapic != -1)
1250 panic("assign_apic_irq: inconsistent table");
1252 int_to_apicintpin[irq].ioapic = apic;
1253 int_to_apicintpin[irq].int_pin = intpin;
1254 int_to_apicintpin[irq].apic_address = ioapic[apic];
1255 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1257 for (x = 0; x < nintrs; x++) {
1258 if ((io_apic_ints[x].int_type == 0 ||
1259 io_apic_ints[x].int_type == 3) &&
1260 io_apic_ints[x].int_vector == 0xff &&
1261 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1262 io_apic_ints[x].dst_apic_int == intpin)
1263 io_apic_ints[x].int_vector = irq;
1267 void
1268 revoke_apic_irq(int irq)
1270 int x;
1271 int oldapic;
1272 int oldintpin;
1274 if (int_to_apicintpin[irq].ioapic == -1)
1275 panic("revoke_apic_irq: inconsistent table");
1277 oldapic = int_to_apicintpin[irq].ioapic;
1278 oldintpin = int_to_apicintpin[irq].int_pin;
1280 int_to_apicintpin[irq].ioapic = -1;
1281 int_to_apicintpin[irq].int_pin = 0;
1282 int_to_apicintpin[irq].apic_address = NULL;
1283 int_to_apicintpin[irq].redirindex = 0;
1285 for (x = 0; x < nintrs; x++) {
1286 if ((io_apic_ints[x].int_type == 0 ||
1287 io_apic_ints[x].int_type == 3) &&
1288 io_apic_ints[x].int_vector != 0xff &&
1289 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1290 io_apic_ints[x].dst_apic_int == oldintpin)
1291 io_apic_ints[x].int_vector = 0xff;
1296 * Allocate an IRQ
1298 static void
1299 allocate_apic_irq(int intr)
1301 int apic;
1302 int intpin;
1303 int irq;
1305 if (io_apic_ints[intr].int_vector != 0xff)
1306 return; /* Interrupt handler already assigned */
1308 if (io_apic_ints[intr].int_type != 0 &&
1309 (io_apic_ints[intr].int_type != 3 ||
1310 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1311 io_apic_ints[intr].dst_apic_int == 0)))
1312 return; /* Not INT or ExtInt on != (0, 0) */
1314 irq = 0;
1315 while (irq < APIC_INTMAPSIZE &&
1316 int_to_apicintpin[irq].ioapic != -1)
1317 irq++;
1319 if (irq >= APIC_INTMAPSIZE)
1320 return; /* No free interrupt handlers */
1322 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1323 intpin = io_apic_ints[intr].dst_apic_int;
1325 assign_apic_irq(apic, intpin, irq);
1326 io_apic_setup_intpin(apic, intpin);
1330 static void
1331 swap_apic_id(int apic, int oldid, int newid)
1333 int x;
1334 int oapic;
1337 if (oldid == newid)
1338 return; /* Nothing to do */
1340 kprintf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1341 apic, oldid, newid);
1343 /* Swap physical APIC IDs in interrupt entries */
1344 for (x = 0; x < nintrs; x++) {
1345 if (io_apic_ints[x].dst_apic_id == oldid)
1346 io_apic_ints[x].dst_apic_id = newid;
1347 else if (io_apic_ints[x].dst_apic_id == newid)
1348 io_apic_ints[x].dst_apic_id = oldid;
1351 /* Swap physical APIC IDs in IO_TO_ID mappings */
1352 for (oapic = 0; oapic < mp_napics; oapic++)
1353 if (IO_TO_ID(oapic) == newid)
1354 break;
1356 if (oapic < mp_napics) {
1357 kprintf("Changing APIC ID for IO APIC #%d from "
1358 "%d to %d in MP table\n",
1359 oapic, newid, oldid);
1360 IO_TO_ID(oapic) = oldid;
1362 IO_TO_ID(apic) = newid;
1366 static void
1367 fix_id_to_io_mapping(void)
1369 int x;
1371 for (x = 0; x < NAPICID; x++)
1372 ID_TO_IO(x) = -1;
1374 for (x = 0; x <= mp_naps; x++)
1375 if (CPU_TO_ID(x) < NAPICID)
1376 ID_TO_IO(CPU_TO_ID(x)) = x;
1378 for (x = 0; x < mp_napics; x++)
1379 if (IO_TO_ID(x) < NAPICID)
1380 ID_TO_IO(IO_TO_ID(x)) = x;
1384 static int
1385 first_free_apic_id(void)
1387 int freeid, x;
1389 for (freeid = 0; freeid < NAPICID; freeid++) {
1390 for (x = 0; x <= mp_naps; x++)
1391 if (CPU_TO_ID(x) == freeid)
1392 break;
1393 if (x <= mp_naps)
1394 continue;
1395 for (x = 0; x < mp_napics; x++)
1396 if (IO_TO_ID(x) == freeid)
1397 break;
1398 if (x < mp_napics)
1399 continue;
1400 return freeid;
1402 return freeid;
1406 static int
1407 io_apic_id_acceptable(int apic, int id)
1409 int cpu; /* Logical CPU number */
1410 int oapic; /* Logical IO APIC number for other IO APIC */
1412 if (id >= NAPICID)
1413 return 0; /* Out of range */
1415 for (cpu = 0; cpu <= mp_naps; cpu++)
1416 if (CPU_TO_ID(cpu) == id)
1417 return 0; /* Conflict with CPU */
1419 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1420 if (IO_TO_ID(oapic) == id)
1421 return 0; /* Conflict with other APIC */
1423 return 1; /* ID is acceptable for IO APIC */
1426 static
1427 io_int *
1428 io_apic_find_int_entry(int apic, int pin)
1430 int x;
1432 /* search each of the possible INTerrupt sources */
1433 for (x = 0; x < nintrs; ++x) {
1434 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1435 (pin == io_apic_ints[x].dst_apic_int))
1436 return (&io_apic_ints[x]);
1438 return NULL;
1441 #endif
1444 * parse an Intel MP specification table
1446 static void
1447 mptable_fix(void)
1449 #ifdef APIC_IO
1450 int x;
1451 int id;
1452 int apic; /* IO APIC unit number */
1453 int freeid; /* Free physical APIC ID */
1454 int physid; /* Current physical IO APIC ID */
1455 io_int *io14;
1456 int bus_0 = 0; /* Stop GCC warning */
1457 int bus_pci = 0; /* Stop GCC warning */
1458 int num_pci_bus;
1461 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1462 * did it wrong. The MP spec says that when more than 1 PCI bus
1463 * exists the BIOS must begin with bus entries for the PCI bus and use
1464 * actual PCI bus numbering. This implies that when only 1 PCI bus
1465 * exists the BIOS can choose to ignore this ordering, and indeed many
1466 * MP motherboards do ignore it. This causes a problem when the PCI
1467 * sub-system makes requests of the MP sub-system based on PCI bus
1468 * numbers. So here we look for the situation and renumber the
1469 * busses and associated INTs in an effort to "make it right".
1472 /* find bus 0, PCI bus, count the number of PCI busses */
1473 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1474 if (bus_data[x].bus_id == 0) {
1475 bus_0 = x;
1477 if (bus_data[x].bus_type == PCI) {
1478 ++num_pci_bus;
1479 bus_pci = x;
1483 * bus_0 == slot of bus with ID of 0
1484 * bus_pci == slot of last PCI bus encountered
1487 /* check the 1 PCI bus case for sanity */
1488 /* if it is number 0 all is well */
1489 if (num_pci_bus == 1 &&
1490 bus_data[bus_pci].bus_id != 0) {
1492 /* mis-numbered, swap with whichever bus uses slot 0 */
1494 /* swap the bus entry types */
1495 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1496 bus_data[bus_0].bus_type = PCI;
1498 /* swap each relavant INTerrupt entry */
1499 id = bus_data[bus_pci].bus_id;
1500 for (x = 0; x < nintrs; ++x) {
1501 if (io_apic_ints[x].src_bus_id == id) {
1502 io_apic_ints[x].src_bus_id = 0;
1504 else if (io_apic_ints[x].src_bus_id == 0) {
1505 io_apic_ints[x].src_bus_id = id;
1510 /* Assign IO APIC IDs.
1512 * First try the existing ID. If a conflict is detected, try
1513 * the ID in the MP table. If a conflict is still detected, find
1514 * a free id.
1516 * We cannot use the ID_TO_IO table before all conflicts has been
1517 * resolved and the table has been corrected.
1519 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1521 /* First try to use the value set by the BIOS */
1522 physid = io_apic_get_id(apic);
1523 if (io_apic_id_acceptable(apic, physid)) {
1524 if (IO_TO_ID(apic) != physid)
1525 swap_apic_id(apic, IO_TO_ID(apic), physid);
1526 continue;
1529 /* Then check if the value in the MP table is acceptable */
1530 if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1531 continue;
1533 /* Last resort, find a free APIC ID and use it */
1534 freeid = first_free_apic_id();
1535 if (freeid >= NAPICID)
1536 panic("No free physical APIC IDs found");
1538 if (io_apic_id_acceptable(apic, freeid)) {
1539 swap_apic_id(apic, IO_TO_ID(apic), freeid);
1540 continue;
1542 panic("Free physical APIC ID not usable");
1544 fix_id_to_io_mapping();
1546 /* detect and fix broken Compaq MP table */
1547 if (apic_int_type(0, 0) == -1) {
1548 kprintf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1549 io_apic_ints[nintrs].int_type = 3; /* ExtInt */
1550 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */
1551 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1552 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1553 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */
1554 nintrs++;
1555 } else if (apic_int_type(0, 0) == 0) {
1556 kprintf("APIC_IO: MP table broken: ExtINT entry corrupt!\n");
1557 for (x = 0; x < nintrs; ++x)
1558 if ((0 == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1559 (0 == io_apic_ints[x].dst_apic_int)) {
1560 io_apic_ints[x].int_type = 3;
1561 io_apic_ints[x].int_vector = 0xff;
1562 break;
1567 * Fix missing IRQ 15 when IRQ 14 is an ISA interrupt. IDE
1568 * controllers universally come in pairs. If IRQ 14 is specified
1569 * as an ISA interrupt, then IRQ 15 had better be too.
1571 * [ Shuttle XPC / AMD Athlon X2 ]
1572 * The MPTable is missing an entry for IRQ 15. Note that the
1573 * ACPI table has an entry for both 14 and 15.
1575 if (apic_int_type(0, 14) == 0 && apic_int_type(0, 15) == -1) {
1576 kprintf("APIC_IO: MP table broken: IRQ 15 not ISA when IRQ 14 is!\n");
1577 io14 = io_apic_find_int_entry(0, 14);
1578 io_apic_ints[nintrs] = *io14;
1579 io_apic_ints[nintrs].src_bus_irq = 15;
1580 io_apic_ints[nintrs].dst_apic_int = 15;
1581 nintrs++;
1583 #endif
1586 #ifdef APIC_IO
1588 /* Assign low level interrupt handlers */
1589 static void
1590 setup_apic_irq_mapping(void)
1592 int x;
1593 int int_vector;
1595 /* Clear array */
1596 for (x = 0; x < APIC_INTMAPSIZE; x++) {
1597 int_to_apicintpin[x].ioapic = -1;
1598 int_to_apicintpin[x].int_pin = 0;
1599 int_to_apicintpin[x].apic_address = NULL;
1600 int_to_apicintpin[x].redirindex = 0;
1603 /* First assign ISA/EISA interrupts */
1604 for (x = 0; x < nintrs; x++) {
1605 int_vector = io_apic_ints[x].src_bus_irq;
1606 if (int_vector < APIC_INTMAPSIZE &&
1607 io_apic_ints[x].int_vector == 0xff &&
1608 int_to_apicintpin[int_vector].ioapic == -1 &&
1609 (apic_int_is_bus_type(x, ISA) ||
1610 apic_int_is_bus_type(x, EISA)) &&
1611 io_apic_ints[x].int_type == 0) {
1612 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1613 io_apic_ints[x].dst_apic_int,
1614 int_vector);
1618 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1619 for (x = 0; x < nintrs; x++) {
1620 if (io_apic_ints[x].dst_apic_int == 0 &&
1621 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1622 io_apic_ints[x].int_vector == 0xff &&
1623 int_to_apicintpin[0].ioapic == -1 &&
1624 io_apic_ints[x].int_type == 3) {
1625 assign_apic_irq(0, 0, 0);
1626 break;
1629 /* PCI interrupt assignment is deferred */
1632 #endif
1634 void
1635 mp_set_cpuids(int cpu_id, int apic_id)
1637 CPU_TO_ID(cpu_id) = apic_id;
1638 ID_TO_CPU(apic_id) = cpu_id;
1641 static int
1642 processor_entry(const struct PROCENTRY *entry, int cpu)
1644 KKASSERT(cpu > 0);
1646 /* check for usability */
1647 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1648 return 0;
1650 if(entry->apic_id >= NAPICID)
1651 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1652 /* check for BSP flag */
1653 if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1654 mp_set_cpuids(0, entry->apic_id);
1655 return 0; /* its already been counted */
1658 /* add another AP to list, if less than max number of CPUs */
1659 else if (cpu < MAXCPU) {
1660 mp_set_cpuids(cpu, entry->apic_id);
1661 return 1;
1664 return 0;
1667 #ifdef APIC_IO
1669 static int
1670 bus_entry(bus_entry_ptr entry, int bus)
1672 int x;
1673 char c, name[8];
1675 /* encode the name into an index */
1676 for (x = 0; x < 6; ++x) {
1677 if ((c = entry->bus_type[x]) == ' ')
1678 break;
1679 name[x] = c;
1681 name[x] = '\0';
1683 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1684 panic("unknown bus type: '%s'", name);
1686 bus_data[bus].bus_id = entry->bus_id;
1687 bus_data[bus].bus_type = x;
1689 return 1;
1692 static int
1693 io_apic_entry(io_apic_entry_ptr entry, int apic)
1695 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1696 return 0;
1698 IO_TO_ID(apic) = entry->apic_id;
1699 if (entry->apic_id < NAPICID)
1700 ID_TO_IO(entry->apic_id) = apic;
1702 return 1;
1705 #endif
1707 static int
1708 lookup_bus_type(char *name)
1710 int x;
1712 for (x = 0; x < MAX_BUSTYPE; ++x)
1713 if (strcmp(bus_type_table[x].name, name) == 0)
1714 return bus_type_table[x].type;
1716 return UNKNOWN_BUSTYPE;
1719 #ifdef APIC_IO
1721 static int
1722 int_entry(int_entry_ptr entry, int intr)
1724 int apic;
1726 io_apic_ints[intr].int_type = entry->int_type;
1727 io_apic_ints[intr].int_flags = entry->int_flags;
1728 io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1729 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1730 if (entry->dst_apic_id == 255) {
1731 /* This signal goes to all IO APICS. Select an IO APIC
1732 with sufficient number of interrupt pins */
1733 for (apic = 0; apic < mp_napics; apic++)
1734 if (((io_apic_read(apic, IOAPIC_VER) &
1735 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1736 entry->dst_apic_int)
1737 break;
1738 if (apic < mp_napics)
1739 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1740 else
1741 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1742 } else
1743 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1744 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1746 return 1;
1749 static int
1750 apic_int_is_bus_type(int intr, int bus_type)
1752 int bus;
1754 for (bus = 0; bus < mp_nbusses; ++bus)
1755 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1756 && ((int) bus_data[bus].bus_type == bus_type))
1757 return 1;
1759 return 0;
1763 * Given a traditional ISA INT mask, return an APIC mask.
1765 u_int
1766 isa_apic_mask(u_int isa_mask)
1768 int isa_irq;
1769 int apic_pin;
1771 #if defined(SKIP_IRQ15_REDIRECT)
1772 if (isa_mask == (1 << 15)) {
1773 kprintf("skipping ISA IRQ15 redirect\n");
1774 return isa_mask;
1776 #endif /* SKIP_IRQ15_REDIRECT */
1778 isa_irq = ffs(isa_mask); /* find its bit position */
1779 if (isa_irq == 0) /* doesn't exist */
1780 return 0;
1781 --isa_irq; /* make it zero based */
1783 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */
1784 if (apic_pin == -1)
1785 return 0;
1787 return (1 << apic_pin); /* convert pin# to a mask */
1791 * Determine which APIC pin an ISA/EISA INT is attached to.
1793 #define INTTYPE(I) (io_apic_ints[(I)].int_type)
1794 #define INTPIN(I) (io_apic_ints[(I)].dst_apic_int)
1795 #define INTIRQ(I) (io_apic_ints[(I)].int_vector)
1796 #define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1798 #define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq)
1800 isa_apic_irq(int isa_irq)
1802 int intr;
1804 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1805 if (INTTYPE(intr) == 0) { /* standard INT */
1806 if (SRCBUSIRQ(intr) == isa_irq) {
1807 if (apic_int_is_bus_type(intr, ISA) ||
1808 apic_int_is_bus_type(intr, EISA)) {
1809 if (INTIRQ(intr) == 0xff)
1810 return -1; /* unassigned */
1811 return INTIRQ(intr); /* found */
1816 return -1; /* NOT found */
1821 * Determine which APIC pin a PCI INT is attached to.
1823 #define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id)
1824 #define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1825 #define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03)
1827 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1829 int intr;
1831 --pciInt; /* zero based */
1833 for (intr = 0; intr < nintrs; ++intr) { /* check each record */
1834 if ((INTTYPE(intr) == 0) /* standard INT */
1835 && (SRCBUSID(intr) == pciBus)
1836 && (SRCBUSDEVICE(intr) == pciDevice)
1837 && (SRCBUSLINE(intr) == pciInt)) { /* a candidate IRQ */
1838 if (apic_int_is_bus_type(intr, PCI)) {
1839 if (INTIRQ(intr) == 0xff)
1840 allocate_apic_irq(intr);
1841 if (INTIRQ(intr) == 0xff)
1842 return -1; /* unassigned */
1843 return INTIRQ(intr); /* exact match */
1848 return -1; /* NOT found */
1852 next_apic_irq(int irq)
1854 int intr, ointr;
1855 int bus, bustype;
1857 bus = 0;
1858 bustype = 0;
1859 for (intr = 0; intr < nintrs; intr++) {
1860 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1861 continue;
1862 bus = SRCBUSID(intr);
1863 bustype = apic_bus_type(bus);
1864 if (bustype != ISA &&
1865 bustype != EISA &&
1866 bustype != PCI)
1867 continue;
1868 break;
1870 if (intr >= nintrs) {
1871 return -1;
1873 for (ointr = intr + 1; ointr < nintrs; ointr++) {
1874 if (INTTYPE(ointr) != 0)
1875 continue;
1876 if (bus != SRCBUSID(ointr))
1877 continue;
1878 if (bustype == PCI) {
1879 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1880 continue;
1881 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1882 continue;
1884 if (bustype == ISA || bustype == EISA) {
1885 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1886 continue;
1888 if (INTPIN(intr) == INTPIN(ointr))
1889 continue;
1890 break;
1892 if (ointr >= nintrs) {
1893 return -1;
1895 return INTIRQ(ointr);
1897 #undef SRCBUSLINE
1898 #undef SRCBUSDEVICE
1899 #undef SRCBUSID
1900 #undef SRCBUSIRQ
1902 #undef INTPIN
1903 #undef INTIRQ
1904 #undef INTAPIC
1905 #undef INTTYPE
1907 #endif
1910 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1912 * XXX FIXME:
1913 * Exactly what this means is unclear at this point. It is a solution
1914 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard
1915 * could route any of the ISA INTs to upper (>15) IRQ values. But most would
1916 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1917 * option.
1920 undirect_isa_irq(int rirq)
1922 #if defined(READY)
1923 if (bootverbose)
1924 kprintf("Freeing redirected ISA irq %d.\n", rirq);
1925 /** FIXME: tickle the MB redirector chip */
1926 return /* XXX */;
1927 #else
1928 if (bootverbose)
1929 kprintf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1930 return 0;
1931 #endif /* READY */
1936 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1939 undirect_pci_irq(int rirq)
1941 #if defined(READY)
1942 if (bootverbose)
1943 kprintf("Freeing redirected PCI irq %d.\n", rirq);
1945 /** FIXME: tickle the MB redirector chip */
1946 return /* XXX */;
1947 #else
1948 if (bootverbose)
1949 kprintf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1950 rirq);
1951 return 0;
1952 #endif /* READY */
1956 #ifdef APIC_IO
1959 * given a bus ID, return:
1960 * the bus type if found
1961 * -1 if NOT found
1964 apic_bus_type(int id)
1966 int x;
1968 for (x = 0; x < mp_nbusses; ++x)
1969 if (bus_data[x].bus_id == id)
1970 return bus_data[x].bus_type;
1972 return -1;
1976 * given a LOGICAL APIC# and pin#, return:
1977 * the associated src bus ID if found
1978 * -1 if NOT found
1981 apic_src_bus_id(int apic, int pin)
1983 int x;
1985 /* search each of the possible INTerrupt sources */
1986 for (x = 0; x < nintrs; ++x)
1987 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1988 (pin == io_apic_ints[x].dst_apic_int))
1989 return (io_apic_ints[x].src_bus_id);
1991 return -1; /* NOT found */
1995 * given a LOGICAL APIC# and pin#, return:
1996 * the associated src bus IRQ if found
1997 * -1 if NOT found
2000 apic_src_bus_irq(int apic, int pin)
2002 int x;
2004 for (x = 0; x < nintrs; x++)
2005 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2006 (pin == io_apic_ints[x].dst_apic_int))
2007 return (io_apic_ints[x].src_bus_irq);
2009 return -1; /* NOT found */
2014 * given a LOGICAL APIC# and pin#, return:
2015 * the associated INTerrupt type if found
2016 * -1 if NOT found
2019 apic_int_type(int apic, int pin)
2021 int x;
2023 /* search each of the possible INTerrupt sources */
2024 for (x = 0; x < nintrs; ++x) {
2025 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2026 (pin == io_apic_ints[x].dst_apic_int))
2027 return (io_apic_ints[x].int_type);
2029 return -1; /* NOT found */
2033 * Return the IRQ associated with an APIC pin
2035 int
2036 apic_irq(int apic, int pin)
2038 int x;
2039 int res;
2041 for (x = 0; x < nintrs; ++x) {
2042 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2043 (pin == io_apic_ints[x].dst_apic_int)) {
2044 res = io_apic_ints[x].int_vector;
2045 if (res == 0xff)
2046 return -1;
2047 if (apic != int_to_apicintpin[res].ioapic)
2048 panic("apic_irq: inconsistent table %d/%d", apic, int_to_apicintpin[res].ioapic);
2049 if (pin != int_to_apicintpin[res].int_pin)
2050 panic("apic_irq inconsistent table (2)");
2051 return res;
2054 return -1;
2059 * given a LOGICAL APIC# and pin#, return:
2060 * the associated trigger mode if found
2061 * -1 if NOT found
2064 apic_trigger(int apic, int pin)
2066 int x;
2068 /* search each of the possible INTerrupt sources */
2069 for (x = 0; x < nintrs; ++x)
2070 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2071 (pin == io_apic_ints[x].dst_apic_int))
2072 return ((io_apic_ints[x].int_flags >> 2) & 0x03);
2074 return -1; /* NOT found */
2079 * given a LOGICAL APIC# and pin#, return:
2080 * the associated 'active' level if found
2081 * -1 if NOT found
2084 apic_polarity(int apic, int pin)
2086 int x;
2088 /* search each of the possible INTerrupt sources */
2089 for (x = 0; x < nintrs; ++x)
2090 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
2091 (pin == io_apic_ints[x].dst_apic_int))
2092 return (io_apic_ints[x].int_flags & 0x03);
2094 return -1; /* NOT found */
2097 #endif
2100 * set data according to MP defaults
2101 * FIXME: probably not complete yet...
2103 static void
2104 mptable_default(int type)
2106 #if defined(APIC_IO)
2107 int io_apic_id;
2108 int pin;
2109 #endif /* APIC_IO */
2111 #if 0
2112 kprintf(" MP default config type: %d\n", type);
2113 switch (type) {
2114 case 1:
2115 kprintf(" bus: ISA, APIC: 82489DX\n");
2116 break;
2117 case 2:
2118 kprintf(" bus: EISA, APIC: 82489DX\n");
2119 break;
2120 case 3:
2121 kprintf(" bus: EISA, APIC: 82489DX\n");
2122 break;
2123 case 4:
2124 kprintf(" bus: MCA, APIC: 82489DX\n");
2125 break;
2126 case 5:
2127 kprintf(" bus: ISA+PCI, APIC: Integrated\n");
2128 break;
2129 case 6:
2130 kprintf(" bus: EISA+PCI, APIC: Integrated\n");
2131 break;
2132 case 7:
2133 kprintf(" bus: MCA+PCI, APIC: Integrated\n");
2134 break;
2135 default:
2136 kprintf(" future type\n");
2137 break;
2138 /* NOTREACHED */
2140 #endif /* 0 */
2142 #if defined(APIC_IO)
2143 /* one and only IO APIC */
2144 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
2147 * sanity check, refer to MP spec section 3.6.6, last paragraph
2148 * necessary as some hardware isn't properly setting up the IO APIC
2150 #if defined(REALLY_ANAL_IOAPICID_VALUE)
2151 if (io_apic_id != 2) {
2152 #else
2153 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
2154 #endif /* REALLY_ANAL_IOAPICID_VALUE */
2155 io_apic_set_id(0, 2);
2156 io_apic_id = 2;
2158 IO_TO_ID(0) = io_apic_id;
2159 ID_TO_IO(io_apic_id) = 0;
2160 #endif /* APIC_IO */
2162 /* fill out bus entries */
2163 switch (type) {
2164 case 1:
2165 case 2:
2166 case 3:
2167 case 4:
2168 case 5:
2169 case 6:
2170 case 7:
2171 #ifdef APIC_IO
2172 bus_data[0].bus_id = default_data[type - 1][1];
2173 bus_data[0].bus_type = default_data[type - 1][2];
2174 bus_data[1].bus_id = default_data[type - 1][3];
2175 bus_data[1].bus_type = default_data[type - 1][4];
2176 #endif
2177 break;
2179 /* case 4: case 7: MCA NOT supported */
2180 default: /* illegal/reserved */
2181 panic("BAD default MP config: %d", type);
2182 /* NOTREACHED */
2185 #if defined(APIC_IO)
2186 /* general cases from MP v1.4, table 5-2 */
2187 for (pin = 0; pin < 16; ++pin) {
2188 io_apic_ints[pin].int_type = 0;
2189 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */
2190 io_apic_ints[pin].src_bus_id = 0;
2191 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */
2192 io_apic_ints[pin].dst_apic_id = io_apic_id;
2193 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */
2196 /* special cases from MP v1.4, table 5-2 */
2197 if (type == 2) {
2198 io_apic_ints[2].int_type = 0xff; /* N/C */
2199 io_apic_ints[13].int_type = 0xff; /* N/C */
2200 #if !defined(APIC_MIXED_MODE)
2201 /** FIXME: ??? */
2202 panic("sorry, can't support type 2 default yet");
2203 #endif /* APIC_MIXED_MODE */
2205 else
2206 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */
2208 if (type == 7)
2209 io_apic_ints[0].int_type = 0xff; /* N/C */
2210 else
2211 io_apic_ints[0].int_type = 3; /* vectored 8259 */
2212 #endif /* APIC_IO */
2216 * Map a physical memory address representing I/O into KVA. The I/O
2217 * block is assumed not to cross a page boundary.
2219 void *
2220 permanent_io_mapping(vm_paddr_t pa)
2222 vm_offset_t vaddr;
2223 int pgeflag;
2224 int i;
2226 KKASSERT(pa < 0x100000000LL);
2228 pgeflag = 0; /* not used for SMP yet */
2231 * If the requested physical address has already been incidently
2232 * mapped, just use the existing mapping. Otherwise create a new
2233 * mapping.
2235 for (i = IO_MAPPING_START_INDEX; i < SMPpt_alloc_index; ++i) {
2236 if (((vm_offset_t)SMPpt[i] & PG_FRAME) ==
2237 ((vm_offset_t)pa & PG_FRAME)) {
2238 break;
2241 if (i == SMPpt_alloc_index) {
2242 if (i == NPTEPG - 2) {
2243 panic("permanent_io_mapping: We ran out of space"
2244 " in SMPpt[]!");
2246 SMPpt[i] = (pt_entry_t)(PG_V | PG_RW | pgeflag |
2247 ((vm_offset_t)pa & PG_FRAME));
2248 ++SMPpt_alloc_index;
2250 vaddr = (vm_offset_t)CPU_prvspace + (i * PAGE_SIZE) +
2251 ((vm_offset_t)pa & PAGE_MASK);
2252 return ((void *)vaddr);
2256 * start each AP in our list
2258 static int
2259 start_all_aps(u_int boot_addr)
2261 int x, i, pg;
2262 int shift;
2263 u_char mpbiosreason;
2264 u_long mpbioswarmvec;
2265 struct mdglobaldata *gd;
2266 struct privatespace *ps;
2267 char *stack;
2268 uintptr_t kptbase;
2270 POSTCODE(START_ALL_APS_POST);
2272 /* Initialize BSP's local APIC */
2273 apic_initialize(TRUE);
2275 /* install the AP 1st level boot code */
2276 install_ap_tramp(boot_addr);
2279 /* save the current value of the warm-start vector */
2280 mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
2281 outb(CMOS_REG, BIOS_RESET);
2282 mpbiosreason = inb(CMOS_DATA);
2284 /* set up temporary P==V mapping for AP boot */
2285 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
2286 kptbase = (uintptr_t)(void *)KPTphys;
2287 for (x = 0; x < NKPT; x++) {
2288 PTD[x] = (pd_entry_t)(PG_V | PG_RW |
2289 ((kptbase + x * PAGE_SIZE) & PG_FRAME));
2291 cpu_invltlb();
2293 /* start each AP */
2294 for (x = 1; x <= mp_naps; ++x) {
2296 /* This is a bit verbose, it will go away soon. */
2298 /* first page of AP's private space */
2299 pg = x * i386_btop(sizeof(struct privatespace));
2301 /* allocate new private data page(s) */
2302 gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
2303 MDGLOBALDATA_BASEALLOC_SIZE);
2304 /* wire it into the private page table page */
2305 for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) {
2306 SMPpt[pg + i / PAGE_SIZE] = (pt_entry_t)
2307 (PG_V | PG_RW | vtophys_pte((char *)gd + i));
2309 pg += MDGLOBALDATA_BASEALLOC_PAGES;
2311 SMPpt[pg + 0] = 0; /* *gd_CMAP1 */
2312 SMPpt[pg + 1] = 0; /* *gd_CMAP2 */
2313 SMPpt[pg + 2] = 0; /* *gd_CMAP3 */
2314 SMPpt[pg + 3] = 0; /* *gd_PMAP1 */
2316 /* allocate and set up an idle stack data page */
2317 stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE);
2318 for (i = 0; i < UPAGES; i++) {
2319 SMPpt[pg + 4 + i] = (pt_entry_t)
2320 (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack));
2323 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
2324 bzero(gd, sizeof(*gd));
2325 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
2327 /* prime data page for it to use */
2328 mi_gdinit(&gd->mi, x);
2329 cpu_gdinit(gd, x);
2330 gd->gd_CMAP1 = &SMPpt[pg + 0];
2331 gd->gd_CMAP2 = &SMPpt[pg + 1];
2332 gd->gd_CMAP3 = &SMPpt[pg + 2];
2333 gd->gd_PMAP1 = &SMPpt[pg + 3];
2334 gd->gd_CADDR1 = ps->CPAGE1;
2335 gd->gd_CADDR2 = ps->CPAGE2;
2336 gd->gd_CADDR3 = ps->CPAGE3;
2337 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
2338 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
2339 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
2341 /* setup a vector to our boot code */
2342 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2343 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2344 outb(CMOS_REG, BIOS_RESET);
2345 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
2348 * Setup the AP boot stack
2350 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
2351 bootAP = x;
2353 /* attempt to start the Application Processor */
2354 CHECK_INIT(99); /* setup checkpoints */
2355 if (!start_ap(gd, boot_addr)) {
2356 kprintf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2357 CHECK_PRINT("trace"); /* show checkpoints */
2358 /* better panic as the AP may be running loose */
2359 kprintf("panic y/n? [y] ");
2360 if (cngetc() != 'n')
2361 panic("bye-bye");
2363 CHECK_PRINT("trace"); /* show checkpoints */
2365 /* record its version info */
2366 cpu_apic_versions[x] = cpu_apic_versions[0];
2369 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
2370 ncpus = x;
2372 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
2373 for (shift = 0; (1 << shift) <= ncpus; ++shift)
2375 --shift;
2376 ncpus2_shift = shift;
2377 ncpus2 = 1 << shift;
2378 ncpus2_mask = ncpus2 - 1;
2380 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
2381 if ((1 << shift) < ncpus)
2382 ++shift;
2383 ncpus_fit = 1 << shift;
2384 ncpus_fit_mask = ncpus_fit - 1;
2386 /* build our map of 'other' CPUs */
2387 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2388 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
2389 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
2391 /* fill in our (BSP) APIC version */
2392 cpu_apic_versions[0] = lapic.version;
2394 /* restore the warmstart vector */
2395 *(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2396 outb(CMOS_REG, BIOS_RESET);
2397 outb(CMOS_DATA, mpbiosreason);
2400 * NOTE! The idlestack for the BSP was setup by locore. Finish
2401 * up, clean out the P==V mapping we did earlier.
2403 for (x = 0; x < NKPT; x++)
2404 PTD[x] = 0;
2405 pmap_set_opt();
2407 /* number of APs actually started */
2408 return ncpus - 1;
2413 * load the 1st level AP boot code into base memory.
2416 /* targets for relocation */
2417 extern void bigJump(void);
2418 extern void bootCodeSeg(void);
2419 extern void bootDataSeg(void);
2420 extern void MPentry(void);
2421 extern u_int MP_GDT;
2422 extern u_int mp_gdtbase;
2424 static void
2425 install_ap_tramp(u_int boot_addr)
2427 int x;
2428 int size = *(int *) ((u_long) & bootMP_size);
2429 u_char *src = (u_char *) ((u_long) bootMP);
2430 u_char *dst = (u_char *) boot_addr + KERNBASE;
2431 u_int boot_base = (u_int) bootMP;
2432 u_int8_t *dst8;
2433 u_int16_t *dst16;
2434 u_int32_t *dst32;
2436 POSTCODE(INSTALL_AP_TRAMP_POST);
2438 for (x = 0; x < size; ++x)
2439 *dst++ = *src++;
2442 * modify addresses in code we just moved to basemem. unfortunately we
2443 * need fairly detailed info about mpboot.s for this to work. changes
2444 * to mpboot.s might require changes here.
2447 /* boot code is located in KERNEL space */
2448 dst = (u_char *) boot_addr + KERNBASE;
2450 /* modify the lgdt arg */
2451 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2452 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2454 /* modify the ljmp target for MPentry() */
2455 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2456 *dst32 = ((u_int) MPentry - KERNBASE);
2458 /* modify the target for boot code segment */
2459 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2460 dst8 = (u_int8_t *) (dst16 + 1);
2461 *dst16 = (u_int) boot_addr & 0xffff;
2462 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2464 /* modify the target for boot data segment */
2465 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2466 dst8 = (u_int8_t *) (dst16 + 1);
2467 *dst16 = (u_int) boot_addr & 0xffff;
2468 *dst8 = ((u_int) boot_addr >> 16) & 0xff;
2473 * this function starts the AP (application processor) identified
2474 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
2475 * to accomplish this. This is necessary because of the nuances
2476 * of the different hardware we might encounter. It ain't pretty,
2477 * but it seems to work.
2479 * NOTE: eventually an AP gets to ap_init(), which is called just
2480 * before the AP goes into the LWKT scheduler's idle loop.
2482 static int
2483 start_ap(struct mdglobaldata *gd, u_int boot_addr)
2485 int physical_cpu;
2486 int vector;
2487 u_long icr_lo, icr_hi;
2489 POSTCODE(START_AP_POST);
2491 /* get the PHYSICAL APIC ID# */
2492 physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
2494 /* calculate the vector */
2495 vector = (boot_addr >> 12) & 0xff;
2497 /* Make sure the target cpu sees everything */
2498 wbinvd();
2501 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2502 * and running the target CPU. OR this INIT IPI might be latched (P5
2503 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2504 * ignored.
2507 /* setup the address for the target AP */
2508 icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2509 icr_hi |= (physical_cpu << 24);
2510 lapic.icr_hi = icr_hi;
2512 /* do an INIT IPI: assert RESET */
2513 icr_lo = lapic.icr_lo & 0xfff00000;
2514 lapic.icr_lo = icr_lo | 0x0000c500;
2516 /* wait for pending status end */
2517 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2518 /* spin */ ;
2520 /* do an INIT IPI: deassert RESET */
2521 lapic.icr_lo = icr_lo | 0x00008500;
2523 /* wait for pending status end */
2524 u_sleep(10000); /* wait ~10mS */
2525 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2526 /* spin */ ;
2529 * next we do a STARTUP IPI: the previous INIT IPI might still be
2530 * latched, (P5 bug) this 1st STARTUP would then terminate
2531 * immediately, and the previously started INIT IPI would continue. OR
2532 * the previous INIT IPI has already run. and this STARTUP IPI will
2533 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2534 * will run.
2537 /* do a STARTUP IPI */
2538 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2539 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2540 /* spin */ ;
2541 u_sleep(200); /* wait ~200uS */
2544 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2545 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2546 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2547 * recognized after hardware RESET or INIT IPI.
2550 lapic.icr_lo = icr_lo | 0x00000600 | vector;
2551 while (lapic.icr_lo & APIC_DELSTAT_MASK)
2552 /* spin */ ;
2553 u_sleep(200); /* wait ~200uS */
2555 /* wait for it to start, see ap_init() */
2556 set_apic_timer(5000000);/* == 5 seconds */
2557 while (read_apic_timer()) {
2558 if (smp_startup_mask & (1 << gd->mi.gd_cpuid))
2559 return 1; /* return SUCCESS */
2561 return 0; /* return FAILURE */
2566 * Lazy flush the TLB on all other CPU's. DEPRECATED.
2568 * If for some reason we were unable to start all cpus we cannot safely
2569 * use broadcast IPIs.
2571 void
2572 smp_invltlb(void)
2574 #ifdef SMP
2575 if (smp_startup_mask == smp_active_mask) {
2576 all_but_self_ipi(XINVLTLB_OFFSET);
2577 } else {
2578 selected_apic_ipi(smp_active_mask, XINVLTLB_OFFSET,
2579 APIC_DELMODE_FIXED);
2581 #endif
2585 * When called the executing CPU will send an IPI to all other CPUs
2586 * requesting that they halt execution.
2588 * Usually (but not necessarily) called with 'other_cpus' as its arg.
2590 * - Signals all CPUs in map to stop.
2591 * - Waits for each to stop.
2593 * Returns:
2594 * -1: error
2595 * 0: NA
2596 * 1: ok
2598 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2599 * from executing at same time.
2602 stop_cpus(u_int map)
2604 map &= smp_active_mask;
2606 /* send the Xcpustop IPI to all CPUs in map */
2607 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2609 while ((stopped_cpus & map) != map)
2610 /* spin */ ;
2612 return 1;
2617 * Called by a CPU to restart stopped CPUs.
2619 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2621 * - Signals all CPUs in map to restart.
2622 * - Waits for each to restart.
2624 * Returns:
2625 * -1: error
2626 * 0: NA
2627 * 1: ok
2630 restart_cpus(u_int map)
2632 /* signal other cpus to restart */
2633 started_cpus = map & smp_active_mask;
2635 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2636 /* spin */ ;
2638 return 1;
2642 * This is called once the mpboot code has gotten us properly relocated
2643 * and the MMU turned on, etc. ap_init() is actually the idle thread,
2644 * and when it returns the scheduler will call the real cpu_idle() main
2645 * loop for the idlethread. Interrupts are disabled on entry and should
2646 * remain disabled at return.
2648 void
2649 ap_init(void)
2651 u_int apic_id;
2654 * Adjust smp_startup_mask to signal the BSP that we have started
2655 * up successfully. Note that we do not yet hold the BGL. The BSP
2656 * is waiting for our signal.
2658 * We can't set our bit in smp_active_mask yet because we are holding
2659 * interrupts physically disabled and remote cpus could deadlock
2660 * trying to send us an IPI.
2662 smp_startup_mask |= 1 << mycpu->gd_cpuid;
2663 cpu_mfence();
2666 * Interlock for finalization. Wait until mp_finish is non-zero,
2667 * then get the MP lock.
2669 * Note: We are in a critical section.
2671 * Note: We have to synchronize td_mpcount to our desired MP state
2672 * before calling cpu_try_mplock().
2674 * Note: we are the idle thread, we can only spin.
2676 * Note: The load fence is memory volatile and prevents the compiler
2677 * from improperly caching mp_finish, and the cpu from improperly
2678 * caching it.
2680 while (mp_finish == 0)
2681 cpu_lfence();
2682 ++curthread->td_mpcount;
2683 while (cpu_try_mplock() == 0)
2686 if (cpu_feature & CPUID_TSC) {
2688 * The BSP is constantly updating tsc0_offset, figure out the
2689 * relative difference to synchronize ktrdump.
2691 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
2694 /* BSP may have changed PTD while we're waiting for the lock */
2695 cpu_invltlb();
2697 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2698 lidt(&r_idt);
2699 #endif
2701 /* Build our map of 'other' CPUs. */
2702 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2704 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
2706 /* A quick check from sanity claus */
2707 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2708 if (mycpu->gd_cpuid != apic_id) {
2709 kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
2710 kprintf("SMP: apic_id = %d\n", apic_id);
2711 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2712 panic("cpuid mismatch! boom!!");
2715 /* Initialize AP's local APIC for irq's */
2716 apic_initialize(FALSE);
2718 /* Set memory range attributes for this CPU to match the BSP */
2719 mem_range_AP_init();
2722 * Once we go active we must process any IPIQ messages that may
2723 * have been queued, because no actual IPI will occur until we
2724 * set our bit in the smp_active_mask. If we don't the IPI
2725 * message interlock could be left set which would also prevent
2726 * further IPIs.
2728 * The idle loop doesn't expect the BGL to be held and while
2729 * lwkt_switch() normally cleans things up this is a special case
2730 * because we returning almost directly into the idle loop.
2732 * The idle thread is never placed on the runq, make sure
2733 * nothing we've done put it there.
2735 KKASSERT(curthread->td_mpcount == 1);
2736 smp_active_mask |= 1 << mycpu->gd_cpuid;
2739 * Enable interrupts here. idle_restore will also do it, but
2740 * doing it here lets us clean up any strays that got posted to
2741 * the CPU during the AP boot while we are still in a critical
2742 * section.
2744 __asm __volatile("sti; pause; pause"::);
2745 mdcpu->gd_fpending = 0;
2746 mdcpu->gd_ipending = 0;
2748 initclocks_pcpu(); /* clock interrupts (via IPIs) */
2749 lwkt_process_ipiq();
2752 * Releasing the mp lock lets the BSP finish up the SMP init
2754 rel_mplock();
2755 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
2759 * Get SMP fully working before we start initializing devices.
2761 static
2762 void
2763 ap_finish(void)
2765 mp_finish = 1;
2766 if (bootverbose)
2767 kprintf("Finish MP startup\n");
2768 if (cpu_feature & CPUID_TSC)
2769 tsc0_offset = rdtsc();
2770 tsc_offsets[0] = 0;
2771 rel_mplock();
2772 while (smp_active_mask != smp_startup_mask) {
2773 cpu_lfence();
2774 if (cpu_feature & CPUID_TSC)
2775 tsc0_offset = rdtsc();
2777 while (try_mplock() == 0)
2779 if (bootverbose)
2780 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
2783 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
2785 void
2786 cpu_send_ipiq(int dcpu)
2788 if ((1 << dcpu) & smp_active_mask)
2789 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
2792 #if 0 /* single_apic_ipi_passive() not working yet */
2794 * Returns 0 on failure, 1 on success
2797 cpu_send_ipiq_passive(int dcpu)
2799 int r = 0;
2800 if ((1 << dcpu) & smp_active_mask) {
2801 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
2802 APIC_DELMODE_FIXED);
2804 return(r);
2806 #endif
2808 struct mptable_lapic_cbarg1 {
2809 int cpu_count;
2810 u_int id_mask;
2813 static int
2814 mptable_lapic_pass1_callback(void *xarg, const void *pos, int type)
2816 const struct PROCENTRY *ent;
2817 struct mptable_lapic_cbarg1 *arg = xarg;
2819 if (type != 0)
2820 return 0;
2821 ent = pos;
2823 if ((ent->cpu_flags & PROCENTRY_FLAG_EN) == 0)
2824 return 0;
2826 arg->cpu_count++;
2827 arg->id_mask |= 1 << ent->apic_id;
2828 return 0;
2831 struct mptable_lapic_cbarg2 {
2832 int cpu;
2833 int found_bsp;
2836 static int
2837 mptable_lapic_pass2_callback(void *xarg, const void *pos, int type)
2839 const struct PROCENTRY *ent;
2840 struct mptable_lapic_cbarg2 *arg = xarg;
2842 if (type != 0)
2843 return 0;
2844 ent = pos;
2846 if (ent->cpu_flags & PROCENTRY_FLAG_BP) {
2847 KKASSERT(!arg->found_bsp);
2848 arg->found_bsp = 1;
2851 if (processor_entry(ent, arg->cpu))
2852 arg->cpu++;
2854 if (need_hyperthreading_fixup) {
2855 struct PROCENTRY proc;
2856 int i;
2859 * Create fake mptable processor entries
2860 * and feed them to processor_entry() to
2861 * enumerate the logical CPUs.
2863 bzero(&proc, sizeof(proc));
2864 proc.type = 0;
2865 proc.cpu_flags = PROCENTRY_FLAG_EN;
2866 proc.apic_id = ent->apic_id;
2868 for (i = 1; i < logical_cpus; i++) {
2869 proc.apic_id++;
2870 processor_entry(&proc, arg->cpu);
2871 logical_cpus_mask |= (1 << arg->cpu);
2872 arg->cpu++;
2875 return 0;
2878 static void
2879 mptable_lapic_default(void)
2881 int ap_apicid, bsp_apicid;
2883 mp_naps = 1; /* exclude BSP */
2885 /* Map local apic before the id field is accessed */
2886 lapic_init(DEFAULT_APIC_BASE);
2888 bsp_apicid = APIC_ID(lapic.id);
2889 ap_apicid = (bsp_apicid == 0) ? 1 : 0;
2891 /* BSP */
2892 mp_set_cpuids(0, bsp_apicid);
2893 /* one and only AP */
2894 mp_set_cpuids(1, ap_apicid);
2898 * Configure:
2899 * cpu_apic_address (common to all CPUs)
2900 * mp_naps
2901 * need_hyperthreading_fixup
2902 * logical_cpus
2903 * logical_cpus_mask
2904 * ID_TO_CPU(N), APIC ID to logical CPU table
2905 * CPU_TO_ID(N), logical CPU to APIC ID table
2907 static void
2908 mptable_lapic_enumerate(struct mptable_pos *mpt)
2910 struct mptable_lapic_cbarg1 arg1;
2911 struct mptable_lapic_cbarg2 arg2;
2912 mpcth_t cth;
2913 int error;
2914 vm_offset_t lapic_addr;
2916 KKASSERT(mpt->mp_fps != NULL);
2919 * Check for use of 'default' configuration
2921 if (mpt->mp_fps->mpfb1 != 0) {
2922 mptable_lapic_default();
2923 return;
2926 cth = mpt->mp_cth;
2927 KKASSERT(cth != NULL);
2929 /* Save local apic address */
2930 lapic_addr = (vm_offset_t)cth->apic_address;
2931 KKASSERT(lapic_addr != 0);
2934 * Find out how many CPUs do we have
2936 bzero(&arg1, sizeof(arg1));
2937 error = mptable_iterate_entries(cth,
2938 mptable_lapic_pass1_callback, &arg1);
2939 if (error)
2940 panic("mptable_iterate_entries(lapic_pass1) failed\n");
2942 KKASSERT(arg1.cpu_count != 0);
2943 mp_naps = arg1.cpu_count;
2945 /* See if we need to fixup HT logical CPUs. */
2946 mptable_hyperthread_fixup(arg1.id_mask);
2948 /* Qualify the numbers again, after hyperthreading fixup */
2949 if (mp_naps > MAXCPU) {
2950 kprintf("Warning: only using %d of %d available CPUs!\n",
2951 MAXCPU, mp_naps);
2952 mp_naps = MAXCPU;
2955 --mp_naps; /* subtract the BSP */
2958 * Link logical CPU id to local apic id
2960 bzero(&arg2, sizeof(arg2));
2961 arg2.cpu = 1;
2963 error = mptable_iterate_entries(cth,
2964 mptable_lapic_pass2_callback, &arg2);
2965 if (error)
2966 panic("mptable_iterate_entries(lapic_pass2) failed\n");
2967 KKASSERT(arg2.found_bsp);
2969 /* Map local apic */
2970 lapic_init(lapic_addr);
2973 static void
2974 lapic_init(vm_offset_t lapic_addr)
2976 /* Local apic is mapped on last page */
2977 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N |
2978 pmap_get_pgeflag() | (lapic_addr & PG_FRAME));
2980 /* Just for printing */
2981 cpu_apic_address = lapic_addr;