2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/cons.h> /* cngetc() */
37 #include <sys/machintr.h>
38 #include <sys/cpu_topology.h>
40 #include <sys/mplock2.h>
43 #include <vm/vm_param.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_extern.h>
48 #include <vm/vm_map.h>
50 #include <machine/smp.h>
51 #include <machine_base/apic/apicreg.h>
52 #include <machine/atomic.h>
53 #include <machine/cpufunc.h>
54 #include <machine/cputypes.h>
55 #include <machine_base/apic/lapic.h>
56 #include <machine_base/apic/ioapic.h>
57 #include <machine_base/acpica/acpi_md_cpu.h>
58 #include <machine/psl.h>
59 #include <machine/segments.h>
60 #include <machine/tss.h>
61 #include <machine/specialreg.h>
62 #include <machine/globaldata.h>
63 #include <machine/pmap_inval.h>
64 #include <machine/clock.h>
66 #include <machine/md_var.h> /* setidt() */
67 #include <machine_base/icu/icu.h> /* IPIs */
68 #include <machine_base/icu/icu_var.h>
69 #include <machine_base/apic/ioapic_abi.h>
70 #include <machine/intr_machdep.h> /* IPIs */
72 #define WARMBOOT_TARGET 0
73 #define WARMBOOT_OFF (KERNBASE + 0x0467)
74 #define WARMBOOT_SEG (KERNBASE + 0x0469)
76 #define CMOS_REG (0x70)
77 #define CMOS_DATA (0x71)
78 #define BIOS_RESET (0x0f)
79 #define BIOS_WARM (0x0a)
81 #define INVLPG_TIMEOUT_DEFAULT 10
82 #define INVLPG_TIMEOUT_VM 60
85 * this code MUST be enabled here and in mpboot.s.
86 * it follows the very early stages of AP boot by placing values in CMOS ram.
87 * it NORMALLY will never be needed and thus the primitive method for enabling.
90 #if defined(CHECK_POINTS)
91 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
92 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
94 #define CHECK_INIT(D); \
95 CHECK_WRITE(0x34, (D)); \
96 CHECK_WRITE(0x35, (D)); \
97 CHECK_WRITE(0x36, (D)); \
98 CHECK_WRITE(0x37, (D)); \
99 CHECK_WRITE(0x38, (D)); \
100 CHECK_WRITE(0x39, (D));
102 #define CHECK_PRINT(S); \
103 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \
112 #else /* CHECK_POINTS */
114 #define CHECK_INIT(D)
115 #define CHECK_PRINT(S)
117 #endif /* CHECK_POINTS */
120 * Values to send to the POST hardware.
122 #define MP_BOOTADDRESS_POST 0x10
123 #define MP_PROBE_POST 0x11
124 #define MPTABLE_PASS1_POST 0x12
126 #define MP_START_POST 0x13
127 #define MP_ENABLE_POST 0x14
128 #define MPTABLE_PASS2_POST 0x15
130 #define START_ALL_APS_POST 0x16
131 #define INSTALL_AP_TRAMP_POST 0x17
132 #define START_AP_POST 0x18
134 #define MP_ANNOUNCE_POST 0x19
136 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
137 int current_postcode
;
139 /** XXX FIXME: what system files declare these??? */
142 extern int _udatasel
;
145 extern int64_t tsc_offsets
[];
147 /* AP uses this during bootstrap. Do not staticize. */
151 struct pcb stoppcbs
[MAXCPU
];
153 extern inthand_t
IDTVEC(fast_syscall
), IDTVEC(fast_syscall32
);
156 * Local data and functions.
159 static u_int boot_address
;
160 static int mp_finish
;
161 static int mp_finish_lapic
;
163 static int start_all_aps(u_int boot_addr
);
165 static void install_ap_tramp(u_int boot_addr
);
167 static int start_ap(struct mdglobaldata
*gd
, u_int boot_addr
, int smibest
);
168 static int smitest(void);
169 static void mp_bsp_simple_setup(void);
171 /* which cpus have been started */
172 __read_mostly
static cpumask_t smp_startup_mask
= CPUMASK_INITIALIZER_ONLYONE
;
173 /* which cpus have lapic been inited */
174 __read_mostly
static cpumask_t smp_lapic_mask
= CPUMASK_INITIALIZER_ONLYONE
;
175 /* which cpus are ready for IPIs etc? */
176 __read_mostly cpumask_t smp_active_mask
= CPUMASK_INITIALIZER_ONLYONE
;
177 __read_mostly cpumask_t smp_finalize_mask
= CPUMASK_INITIALIZER_ONLYONE
;
179 SYSCTL_OPAQUE(_machdep
, OID_AUTO
, smp_active
, CTLFLAG_RD
,
180 &smp_active_mask
, sizeof(smp_active_mask
), "LU", "");
181 static u_int bootMP_size
;
182 __read_mostly
static u_int report_invlpg_src
;
183 SYSCTL_INT(_machdep
, OID_AUTO
, report_invlpg_src
, CTLFLAG_RW
,
184 &report_invlpg_src
, 0, "");
185 __read_mostly
static u_int report_invltlb_src
;
186 SYSCTL_INT(_machdep
, OID_AUTO
, report_invltlb_src
, CTLFLAG_RW
,
187 &report_invltlb_src
, 0, "");
188 __read_mostly
static int optimized_invltlb
;
189 SYSCTL_INT(_machdep
, OID_AUTO
, optimized_invltlb
, CTLFLAG_RW
,
190 &optimized_invltlb
, 0, "");
191 __read_mostly
static int all_but_self_ipi_enable
= 1;
192 SYSCTL_INT(_machdep
, OID_AUTO
, all_but_self_ipi_enable
, CTLFLAG_RW
,
193 &all_but_self_ipi_enable
, 0, "");
194 __read_mostly
static int invlpg_timeout
= INVLPG_TIMEOUT_DEFAULT
;
195 SYSCTL_INT(_machdep
, OID_AUTO
, invlpg_timeout
, CTLFLAG_RW
,
196 &invlpg_timeout
, 0, "");
198 /* Local data for detecting CPU TOPOLOGY */
199 static int core_bits
= 0;
200 static int logical_CPU_bits
= 0;
204 * Calculate usable address in base memory for AP trampoline code.
207 mp_bootaddress(u_int basemem
)
209 POSTCODE(MP_BOOTADDRESS_POST
);
211 bootMP_size
= mptramp_end
- mptramp_start
;
212 boot_address
= trunc_page(basemem
* 1024); /* round down to 4k boundary */
213 if (((basemem
* 1024) - boot_address
) < bootMP_size
)
214 boot_address
-= PAGE_SIZE
; /* not enough, lower by 4k */
215 /* 3 levels of page table pages */
216 mptramp_pagetables
= boot_address
- (PAGE_SIZE
* 3);
218 return mptramp_pagetables
;
222 * Print various information about the SMP system hardware and setup.
229 POSTCODE(MP_ANNOUNCE_POST
);
231 kprintf("DragonFly/MP: Multiprocessor motherboard\n");
232 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0));
233 for (x
= 1; x
<= naps
; ++x
)
234 kprintf(" cpu%d (AP): apic id: %2d\n", x
, CPUID_TO_APICID(x
));
237 kprintf(" Warning: APIC I/O disabled\n");
241 * AP cpu's call this to sync up protected mode.
243 * WARNING! %gs is not set up on entry. This routine sets up %gs.
249 int x
, myid
= bootAP
;
251 struct mdglobaldata
*md
;
252 struct privatespace
*ps
;
253 struct user_segment_descriptor
*gdt
;
255 ps
= CPU_prvspace
[myid
];
256 gdt
= ps
->mdglobaldata
.gd_gdt
;
258 gdt_segs
[GPROC0_SEL
].ssd_base
= (long)&ps
->common_tss
;
259 ps
->mdglobaldata
.mi
.gd_prvspace
= ps
;
261 /* We fill the 32-bit segment descriptors */
262 for (x
= 0; x
< NGDT
; x
++) {
263 if (x
!= GPROC0_SEL
&& x
!= (GPROC0_SEL
+ 1))
264 ssdtosd(&gdt_segs
[x
], &gdt
[x
]);
266 /* And now a 64-bit one */
267 ssdtosyssd(&gdt_segs
[GPROC0_SEL
],
268 (struct system_segment_descriptor
*)&gdt
[GPROC0_SEL
]);
270 r_gdt
.rd_limit
= MAXGDT_LIMIT
- 1;
271 r_gdt
.rd_base
= (long)(intptr_t)gdt
;
272 lgdt(&r_gdt
); /* does magic intra-segment return */
274 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
275 wrmsr(MSR_FSBASE
, 0); /* User value */
276 wrmsr(MSR_GSBASE
, (u_int64_t
)ps
);
277 wrmsr(MSR_KGSBASE
, 0); /* XXX User value while we're in the kernel */
279 lidt(&r_idt_arr
[mdcpu
->mi
.gd_cpuid
]);
287 mdcpu
->gd_currentldt
= _default_ldt
;
290 gsel_tss
= GSEL(GPROC0_SEL
, SEL_KPL
);
291 gdt
[GPROC0_SEL
].sd_type
= SDT_SYSTSS
;
293 md
= mdcpu
; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
296 * TSS entry point for interrupts, traps, and exceptions
297 * (sans NMI). This will always go to near the top of the pcpu
298 * trampoline area. Hardware-pushed data will be copied into
299 * the trap-frame on entry, and (if necessary) returned to the
300 * trampoline on exit.
302 * We store some pcb data for the trampoline code above the
303 * stack the cpu hw pushes into, and arrange things so the
304 * address of tr_pcb_rsp is the same as the desired top of
307 ps
->common_tss
.tss_rsp0
= (register_t
)&ps
->trampoline
.tr_pcb_rsp
;
308 ps
->trampoline
.tr_pcb_rsp
= ps
->common_tss
.tss_rsp0
;
309 ps
->trampoline
.tr_pcb_gs_kernel
= (register_t
)md
;
310 ps
->trampoline
.tr_pcb_cr3
= KPML4phys
; /* adj to user cr3 live */
311 ps
->dbltramp
.tr_pcb_gs_kernel
= (register_t
)md
;
312 ps
->dbltramp
.tr_pcb_cr3
= KPML4phys
;
313 ps
->dbgtramp
.tr_pcb_gs_kernel
= (register_t
)md
;
314 ps
->dbgtramp
.tr_pcb_cr3
= KPML4phys
;
317 ps
->common_tss
.tss_ioopt
= (sizeof ps
->common_tss
) << 16;
319 md
->gd_tss_gdt
= &gdt
[GPROC0_SEL
];
320 md
->gd_common_tssd
= *md
->gd_tss_gdt
;
322 /* double fault stack */
323 ps
->common_tss
.tss_ist1
= (register_t
)&ps
->dbltramp
.tr_pcb_rsp
;
324 ps
->common_tss
.tss_ist2
= (register_t
)&ps
->dbgtramp
.tr_pcb_rsp
;
329 * Set to a known state:
330 * Set by mpboot.s: CR0_PG, CR0_PE
331 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
334 cr0
&= ~(CR0_CD
| CR0_NW
| CR0_EM
);
337 /* Set up the fast syscall stuff */
338 msr
= rdmsr(MSR_EFER
) | EFER_SCE
;
339 wrmsr(MSR_EFER
, msr
);
340 wrmsr(MSR_LSTAR
, (u_int64_t
)IDTVEC(fast_syscall
));
341 wrmsr(MSR_CSTAR
, (u_int64_t
)IDTVEC(fast_syscall32
));
342 msr
= ((u_int64_t
)GSEL(GCODE_SEL
, SEL_KPL
) << 32) |
343 ((u_int64_t
)GSEL(GUCODE32_SEL
, SEL_UPL
) << 48);
344 wrmsr(MSR_STAR
, msr
);
345 wrmsr(MSR_SF_MASK
, PSL_NT
|PSL_T
|PSL_I
|PSL_C
|PSL_D
|PSL_IOPL
|PSL_AC
);
347 pmap_set_opt(); /* PSE/4MB pages, etc */
348 pmap_init_pat(); /* Page Attribute Table */
350 /* set up CPU registers and state */
353 /* set up SSE/NX registers */
356 /* set up FPU state on the AP */
359 /* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */
361 lapic_x2apic_enter(FALSE
);
363 /* disable the APIC, just to be SURE */
364 LAPIC_WRITE(svr
, (LAPIC_READ(svr
) & ~APIC_SVR_ENABLE
));
367 /*******************************************************************
368 * local functions and data
372 * Start the SMP system
375 mp_start_aps(void *dummy __unused
)
378 /* start each Application Processor */
379 start_all_aps(boot_address
);
381 mp_bsp_simple_setup();
384 SYSINIT(startaps
, SI_BOOT2_START_APS
, SI_ORDER_FIRST
, mp_start_aps
, NULL
);
387 * start each AP in our list
390 start_all_aps(u_int boot_addr
)
392 vm_offset_t va
= boot_address
+ KERNBASE
;
393 u_int64_t
*pt4
, *pt3
, *pt2
;
401 u_long mpbioswarmvec
;
402 struct mdglobaldata
*gd
;
403 struct privatespace
*ps
;
406 POSTCODE(START_ALL_APS_POST
);
408 /* install the AP 1st level boot code */
409 pmap_kenter(va
, boot_address
);
410 cpu_invlpg((void *)va
); /* JG XXX */
411 bcopy(mptramp_start
, (void *)va
, bootMP_size
);
413 /* Locate the page tables, they'll be below the trampoline */
414 pt4
= (u_int64_t
*)(uintptr_t)(mptramp_pagetables
+ KERNBASE
);
415 pt3
= pt4
+ (PAGE_SIZE
) / sizeof(u_int64_t
);
416 pt2
= pt3
+ (PAGE_SIZE
) / sizeof(u_int64_t
);
418 /* Create the initial 1GB replicated page tables */
419 for (i
= 0; i
< 512; i
++) {
420 /* Each slot of the level 4 pages points to the same level 3 page */
421 pt4
[i
] = (u_int64_t
)(uintptr_t)(mptramp_pagetables
+ PAGE_SIZE
);
422 pt4
[i
] |= kernel_pmap
->pmap_bits
[PG_V_IDX
] |
423 kernel_pmap
->pmap_bits
[PG_RW_IDX
] |
424 kernel_pmap
->pmap_bits
[PG_U_IDX
];
426 /* Each slot of the level 3 pages points to the same level 2 page */
427 pt3
[i
] = (u_int64_t
)(uintptr_t)(mptramp_pagetables
+ (2 * PAGE_SIZE
));
428 pt3
[i
] |= kernel_pmap
->pmap_bits
[PG_V_IDX
] |
429 kernel_pmap
->pmap_bits
[PG_RW_IDX
] |
430 kernel_pmap
->pmap_bits
[PG_U_IDX
];
432 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
433 pt2
[i
] = i
* (2 * 1024 * 1024);
434 pt2
[i
] |= kernel_pmap
->pmap_bits
[PG_V_IDX
] |
435 kernel_pmap
->pmap_bits
[PG_RW_IDX
] |
436 kernel_pmap
->pmap_bits
[PG_PS_IDX
] |
437 kernel_pmap
->pmap_bits
[PG_U_IDX
];
440 /* save the current value of the warm-start vector */
441 mpbioswarmvec
= *((u_int32_t
*) WARMBOOT_OFF
);
442 outb(CMOS_REG
, BIOS_RESET
);
443 mpbiosreason
= inb(CMOS_DATA
);
445 /* setup a vector to our boot code */
446 *((volatile u_short
*) WARMBOOT_OFF
) = WARMBOOT_TARGET
;
447 *((volatile u_short
*) WARMBOOT_SEG
) = (boot_address
>> 4);
448 outb(CMOS_REG
, BIOS_RESET
);
449 outb(CMOS_DATA
, BIOS_WARM
); /* 'warm-start' */
452 * If we have a TSC we can figure out the SMI interrupt rate.
453 * The SMI does not necessarily use a constant rate. Spend
454 * up to 250ms trying to figure it out.
457 if (cpu_feature
& CPUID_TSC
) {
458 set_apic_timer(275000);
459 smilast
= read_apic_timer();
460 for (x
= 0; x
< 20 && read_apic_timer(); ++x
) {
461 smicount
= smitest();
462 if (smibest
== 0 || smilast
- smicount
< smibest
)
463 smibest
= smilast
- smicount
;
466 if (smibest
> 250000)
470 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
471 1000000 / smibest
, smibest
);
474 * This is nasty but if we are a guest in a virtual machine,
475 * give the smpinvl synchronization code up to 60 seconds
478 if (vmm_guest
!= VMM_GUEST_NONE
)
479 invlpg_timeout
= INVLPG_TIMEOUT_VM
;
482 for (x
= 1; x
<= naps
; ++x
) {
483 /* This is a bit verbose, it will go away soon. */
485 pssize
= sizeof(struct privatespace
);
487 kmem_alloc3(kernel_map
, pssize
, VM_SUBSYS_GD
,
490 CPU_prvspace
[x
] = ps
;
491 gd
= &ps
->mdglobaldata
;
492 gd
->mi
.gd_prvspace
= ps
;
493 gd
->gd_gdt
= (void *)
494 kmem_alloc3(kernel_map
, MAXGDT_LIMIT
, VM_SUBSYS_GD
,
496 bzero(gd
->gd_gdt
, MAXGDT_LIMIT
);
499 kprintf("ps %d %p %d\n", x
, ps
, pssize
);
502 /* prime data page for it to use */
503 mi_gdinit(&gd
->mi
, x
);
505 ipiq_size
= sizeof(struct lwkt_ipiq
) * (naps
+ 1);
506 gd
->mi
.gd_ipiq
= (void *)kmem_alloc3(kernel_map
, ipiq_size
,
507 VM_SUBSYS_IPIQ
, KM_CPU(x
));
508 bzero(gd
->mi
.gd_ipiq
, ipiq_size
);
510 gd
->gd_acpi_id
= CPUID_TO_ACPIID(gd
->mi
.gd_cpuid
);
512 /* initialize arc4random. */
515 /* setup a vector to our boot code */
516 *((volatile u_short
*) WARMBOOT_OFF
) = WARMBOOT_TARGET
;
517 *((volatile u_short
*) WARMBOOT_SEG
) = (boot_addr
>> 4);
518 outb(CMOS_REG
, BIOS_RESET
);
519 outb(CMOS_DATA
, BIOS_WARM
); /* 'warm-start' */
522 * Setup the AP boot stack
524 bootSTK
= &ps
->idlestack
[UPAGES
* PAGE_SIZE
- PAGE_SIZE
];
527 /* attempt to start the Application Processor */
528 CHECK_INIT(99); /* setup checkpoints */
529 if (!start_ap(gd
, boot_addr
, smibest
)) {
530 kprintf("\nAP #%d (PHY# %d) failed!\n",
531 x
, CPUID_TO_APICID(x
));
532 CHECK_PRINT("trace"); /* show checkpoints */
533 /* better panic as the AP may be running loose */
534 kprintf("panic y/n? [y] ");
540 CHECK_PRINT("trace"); /* show checkpoints */
543 /* set ncpus to 1 + highest logical cpu. Not all may have come up */
546 for (shift
= 0; (1 << shift
) <= ncpus
; ++shift
)
550 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
551 if ((1 << shift
) < ncpus
)
553 ncpus_fit
= 1 << shift
;
554 ncpus_fit_mask
= ncpus_fit
- 1;
556 /* build our map of 'other' CPUs */
557 mycpu
->gd_other_cpus
= smp_startup_mask
;
558 CPUMASK_NANDBIT(mycpu
->gd_other_cpus
, mycpu
->gd_cpuid
);
560 malloc_reinit_ncpus();
562 gd
= (struct mdglobaldata
*)mycpu
;
563 gd
->gd_acpi_id
= CPUID_TO_ACPIID(mycpu
->gd_cpuid
);
565 ipiq_size
= sizeof(struct lwkt_ipiq
) * ncpus
;
566 mycpu
->gd_ipiq
= (void *)kmem_alloc3(kernel_map
, ipiq_size
,
567 VM_SUBSYS_IPIQ
, KM_CPU(0));
568 bzero(mycpu
->gd_ipiq
, ipiq_size
);
570 /* initialize arc4random. */
573 /* restore the warmstart vector */
574 *(u_long
*) WARMBOOT_OFF
= mpbioswarmvec
;
575 outb(CMOS_REG
, BIOS_RESET
);
576 outb(CMOS_DATA
, mpbiosreason
);
579 * NOTE! The idlestack for the BSP was setup by locore. Finish
580 * up, clean out the P==V mapping we did earlier.
585 * Wait all APs to finish initializing LAPIC
588 kprintf("SMP: Waiting APs LAPIC initialization\n");
589 if (cpu_feature
& CPUID_TSC
)
590 tsc0_offset
= rdtsc();
595 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask
, smp_startup_mask
)) {
598 if (cpu_feature
& CPUID_TSC
)
599 tsc0_offset
= rdtsc();
601 while (try_mplock() == 0) {
606 /* number of APs actually started */
612 * load the 1st level AP boot code into base memory.
615 /* targets for relocation */
616 extern void bigJump(void);
617 extern void bootCodeSeg(void);
618 extern void bootDataSeg(void);
619 extern void MPentry(void);
621 extern u_int mp_gdtbase
;
626 install_ap_tramp(u_int boot_addr
)
629 int size
= *(int *) ((u_long
) & bootMP_size
);
630 u_char
*src
= (u_char
*) ((u_long
) bootMP
);
631 u_char
*dst
= (u_char
*) boot_addr
+ KERNBASE
;
632 u_int boot_base
= (u_int
) bootMP
;
637 POSTCODE(INSTALL_AP_TRAMP_POST
);
639 for (x
= 0; x
< size
; ++x
)
643 * modify addresses in code we just moved to basemem. unfortunately we
644 * need fairly detailed info about mpboot.s for this to work. changes
645 * to mpboot.s might require changes here.
648 /* boot code is located in KERNEL space */
649 dst
= (u_char
*) boot_addr
+ KERNBASE
;
651 /* modify the lgdt arg */
652 dst32
= (u_int32_t
*) (dst
+ ((u_int
) & mp_gdtbase
- boot_base
));
653 *dst32
= boot_addr
+ ((u_int
) & MP_GDT
- boot_base
);
655 /* modify the ljmp target for MPentry() */
656 dst32
= (u_int32_t
*) (dst
+ ((u_int
) bigJump
- boot_base
) + 1);
657 *dst32
= ((u_int
) MPentry
- KERNBASE
);
659 /* modify the target for boot code segment */
660 dst16
= (u_int16_t
*) (dst
+ ((u_int
) bootCodeSeg
- boot_base
));
661 dst8
= (u_int8_t
*) (dst16
+ 1);
662 *dst16
= (u_int
) boot_addr
& 0xffff;
663 *dst8
= ((u_int
) boot_addr
>> 16) & 0xff;
665 /* modify the target for boot data segment */
666 dst16
= (u_int16_t
*) (dst
+ ((u_int
) bootDataSeg
- boot_base
));
667 dst8
= (u_int8_t
*) (dst16
+ 1);
668 *dst16
= (u_int
) boot_addr
& 0xffff;
669 *dst8
= ((u_int
) boot_addr
>> 16) & 0xff;
675 * This function starts the AP (application processor) identified
676 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
677 * to accomplish this. This is necessary because of the nuances
678 * of the different hardware we might encounter. It ain't pretty,
679 * but it seems to work.
681 * NOTE: eventually an AP gets to ap_init(), which is called just
682 * before the AP goes into the LWKT scheduler's idle loop.
685 start_ap(struct mdglobaldata
*gd
, u_int boot_addr
, int smibest
)
690 POSTCODE(START_AP_POST
);
692 /* get the PHYSICAL APIC ID# */
693 physical_cpu
= CPUID_TO_APICID(gd
->mi
.gd_cpuid
);
695 /* calculate the vector */
696 vector
= (boot_addr
>> 12) & 0xff;
698 /* We don't want anything interfering */
701 /* Make sure the target cpu sees everything */
705 * Try to detect when a SMI has occurred, wait up to 200ms.
707 * If a SMI occurs during an AP reset but before we issue
708 * the STARTUP command, the AP may brick. To work around
709 * this problem we hold off doing the AP startup until
710 * after we have detected the SMI. Hopefully another SMI
711 * will not occur before we finish the AP startup.
713 * Retries don't seem to help. SMIs have a window of opportunity
714 * and if USB->legacy keyboard emulation is enabled in the BIOS
715 * the interrupt rate can be quite high.
717 * NOTE: Don't worry about the L1 cache load, it might bloat
718 * ldelta a little but ndelta will be so huge when the SMI
719 * occurs the detection logic will still work fine.
722 set_apic_timer(200000);
727 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
728 * and running the target CPU. OR this INIT IPI might be latched (P5
729 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
732 * see apic/apicreg.h for icr bit definitions.
734 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
738 * Do an INIT IPI: assert RESET
740 * Use edge triggered mode to assert INIT
742 lapic_seticr_sync(physical_cpu
,
750 * The spec calls for a 10ms delay but we may have to use a
751 * MUCH lower delay to avoid bricking an AP due to a fast SMI
752 * interrupt. We have other loops here too and dividing by 2
753 * doesn't seem to be enough even after subtracting 350us,
756 * Our minimum delay is 150uS, maximum is 10ms. If no SMI
757 * interrupt was detected we use the full 10ms.
761 else if (smibest
< 150 * 4 + 350)
763 else if ((smibest
- 350) / 4 < 10000)
764 u_sleep((smibest
- 350) / 4);
769 * Do an INIT IPI: deassert RESET
771 * Use level triggered mode to deassert. It is unclear
772 * why we need to do this.
774 lapic_seticr_sync(physical_cpu
,
778 APIC_LEVEL_DEASSERT
|
780 u_sleep(150); /* wait 150us */
783 * Next we do a STARTUP IPI: the previous INIT IPI might still be
784 * latched, (P5 bug) this 1st STARTUP would then terminate
785 * immediately, and the previously started INIT IPI would continue. OR
786 * the previous INIT IPI has already run. and this STARTUP IPI will
787 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
790 * XXX set APIC_LEVEL_ASSERT
792 lapic_seticr_sync(physical_cpu
,
795 APIC_DELMODE_STARTUP
|
797 u_sleep(200); /* wait ~200uS */
800 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
801 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
802 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
803 * recognized after hardware RESET or INIT IPI.
805 * XXX set APIC_LEVEL_ASSERT
807 lapic_seticr_sync(physical_cpu
,
810 APIC_DELMODE_STARTUP
|
813 /* Resume normal operation */
816 /* wait for it to start, see ap_init() */
817 set_apic_timer(5000000);/* == 5 seconds */
818 while (read_apic_timer()) {
819 if (CPUMASK_TESTBIT(smp_startup_mask
, gd
->mi
.gd_cpuid
))
820 return 1; /* return SUCCESS */
823 return 0; /* return FAILURE */
838 while (read_apic_timer()) {
840 for (count
= 0; count
< 100; ++count
)
841 ntsc
= rdtsc(); /* force loop to occur */
843 ndelta
= ntsc
- ltsc
;
846 if (ndelta
> ldelta
* 2)
849 ldelta
= ntsc
- ltsc
;
852 return(read_apic_timer());
856 * Synchronously flush the TLB on all other CPU's. The current cpu's
857 * TLB is not flushed. If the caller wishes to flush the current cpu's
858 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb().
860 * This routine may be called concurrently from multiple cpus. When this
861 * happens, smp_invltlb() can wind up sticking around in the confirmation
862 * while() loop at the end as additional cpus are added to the global
863 * cpumask, until they are acknowledged by another IPI.
865 * NOTE: If for some reason we were unable to start all cpus we cannot
866 * safely use broadcast IPIs.
869 cpumask_t smp_smurf_mask
;
870 static cpumask_t smp_invltlb_mask
;
874 cpumask_t smp_in_mask
;
876 cpumask_t smp_invmask
;
877 extern cpumask_t smp_idleinvl_mask
;
878 extern cpumask_t smp_idleinvl_reqs
;
881 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove
882 * bits that do not need to be IPId. These bits are still part of the command,
883 * but the target cpus have already been signalled and do not need to be
886 #include <sys/spinlock.h>
887 #include <sys/spinlock2.h>
891 smp_smurf_fetchset(cpumask_t
*mask
)
899 while (i
< CPUMASK_ELEMENTS
) {
900 obits
= smp_smurf_mask
.ary
[i
];
902 nbits
= obits
| mask
->ary
[i
];
903 if (atomic_cmpset_long(&smp_smurf_mask
.ary
[i
], obits
, nbits
)) {
904 omask
.ary
[i
] = obits
;
908 CPUMASK_NANDMASK(*mask
, omask
);
912 * This is a mechanism which guarantees that cpu_invltlb() will be executed
913 * on idle cpus without having to signal or wake them up. The invltlb will be
914 * executed when they wake up, prior to any scheduling or interrupt thread.
916 * (*mask) is modified to remove the cpus we successfully negotiate this
917 * function with. This function may only be used with semi-synchronous
918 * commands (typically invltlb's or semi-synchronous invalidations which
919 * are usually associated only with kernel memory).
922 smp_smurf_idleinvlclr(cpumask_t
*mask
)
924 if (optimized_invltlb
) {
925 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs
, *mask
);
926 /* cpu_lfence() not needed */
927 CPUMASK_NANDMASK(*mask
, smp_idleinvl_mask
);
932 * Issue cpu_invltlb() across all cpus except the current cpu.
934 * This function will arrange to avoid idle cpus, but still gurantee that
935 * invltlb is run on them when they wake up prior to any scheduling or
941 struct mdglobaldata
*md
= mdcpu
;
943 unsigned long rflags
;
945 tsc_uclock_t tsc_base
= rdtsc();
949 if (report_invltlb_src
> 0) {
950 if (--report_invltlb_src
<= 0)
955 * Disallow normal interrupts, set all active cpus except our own
956 * in the global smp_invltlb_mask.
958 ++md
->mi
.gd_cnt
.v_smpinvltlb
;
959 crit_enter_gd(&md
->mi
);
962 * Bits we want to set in smp_invltlb_mask. We do not want to signal
963 * our own cpu. Also try to remove bits associated with idle cpus
964 * that we can flag for auto-invltlb.
966 mask
= smp_active_mask
;
967 CPUMASK_NANDBIT(mask
, md
->mi
.gd_cpuid
);
968 smp_smurf_idleinvlclr(&mask
);
970 rflags
= read_rflags();
972 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask
, mask
);
975 * IPI non-idle cpus represented by mask. The omask calculation
976 * removes cpus from the mask which already have a Xinvltlb IPI
977 * pending (avoid double-queueing the IPI).
979 * We must disable real interrupts when setting the smurf flags or
980 * we might race a XINVLTLB before we manage to send the ipi's for
983 * NOTE: We are not signalling ourselves, mask already does NOT
984 * include our own cpu.
986 smp_smurf_fetchset(&mask
);
989 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of
990 * the critical section count on the target cpus.
992 CPUMASK_ORMASK(mask
, md
->mi
.gd_cpumask
);
993 if (all_but_self_ipi_enable
&&
994 (all_but_self_ipi_enable
>= 2 ||
995 CPUMASK_CMPMASKEQ(smp_startup_mask
, mask
))) {
996 all_but_self_ipi(XINVLTLB_OFFSET
);
998 CPUMASK_NANDMASK(mask
, md
->mi
.gd_cpumask
);
999 selected_apic_ipi(mask
, XINVLTLB_OFFSET
, APIC_DELMODE_FIXED
);
1003 * Wait for acknowledgement by all cpus. smp_inval_intr() will
1004 * temporarily enable interrupts to avoid deadlocking the lapic,
1005 * and will also handle running cpu_invltlb() and remote invlpg
1006 * command son our cpu if some other cpu requests it of us.
1008 * WARNING! I originally tried to implement this as a hard loop
1009 * checking only smp_invltlb_mask (and issuing a local
1010 * cpu_invltlb() if requested), with interrupts enabled
1011 * and without calling smp_inval_intr(). This DID NOT WORK.
1012 * It resulted in weird races where smurf bits would get
1013 * cleared without any action being taken.
1016 CPUMASK_ASSZERO(mask
);
1017 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask
, mask
)) {
1021 if (tsc_frequency
&& rdtsc() - tsc_base
> tsc_frequency
) {
1023 * cpuid - cpu doing the waiting
1024 * invltlb_mask - IPI in progress
1026 kprintf("smp_invltlb %2d: WARNING blocked %d sec: "
1032 "idle=%08jx/%08jx\n",
1035 smp_invltlb_mask
.ary
[0],
1036 smp_smurf_mask
.ary
[0],
1040 smp_idleinvl_mask
.ary
[0],
1041 smp_idleinvl_reqs
.ary
[0]);
1042 mdcpu
->gd_xinvaltlb
= 0;
1043 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask
,
1045 smp_invlpg(&smp_active_mask
);
1048 * Reload tsc_base for retry, give up after
1049 * 10 seconds (60 seconds if in VM).
1052 if (++repeats
> invlpg_timeout
) {
1053 kprintf("smp_invltlb: giving up\n");
1054 CPUMASK_ASSZERO(smp_invltlb_mask
);
1059 write_rflags(rflags
);
1060 crit_exit_gd(&md
->mi
);
1064 * Called from a critical section with interrupts hard-disabled.
1065 * This function issues an XINVLTLB IPI and then executes any pending
1066 * command on the current cpu before returning.
1069 smp_invlpg(cpumask_t
*cmdmask
)
1071 struct mdglobaldata
*md
= mdcpu
;
1074 if (report_invlpg_src
> 0) {
1075 if (--report_invlpg_src
<= 0)
1080 * Disallow normal interrupts, set all active cpus in the pmap,
1081 * plus our own for completion processing (it might or might not
1082 * be part of the set).
1084 mask
= smp_active_mask
;
1085 CPUMASK_ANDMASK(mask
, *cmdmask
);
1086 CPUMASK_ORMASK(mask
, md
->mi
.gd_cpumask
);
1089 * Avoid double-queuing IPIs, which can deadlock us. We must disable
1090 * real interrupts when setting the smurf flags or we might race a
1091 * XINVLTLB before we manage to send the ipi's for the bits we set.
1093 * NOTE: We might be including our own cpu in the smurf mask.
1095 smp_smurf_fetchset(&mask
);
1098 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of
1099 * the critical section count on the target cpus.
1101 * We do not include our own cpu when issuing the IPI.
1103 if (all_but_self_ipi_enable
&&
1104 (all_but_self_ipi_enable
>= 2 ||
1105 CPUMASK_CMPMASKEQ(smp_startup_mask
, mask
))) {
1106 all_but_self_ipi(XINVLTLB_OFFSET
);
1108 CPUMASK_NANDMASK(mask
, md
->mi
.gd_cpumask
);
1109 selected_apic_ipi(mask
, XINVLTLB_OFFSET
, APIC_DELMODE_FIXED
);
1113 * This will synchronously wait for our command to complete,
1114 * as well as process commands from other cpus. It also handles
1117 * (interrupts are disabled and we are in a critical section here)
1123 * Issue rip/rsp sniffs
1128 globaldata_t gd
= mycpu
;
1133 * Ignore all_but_self_ipi_enable here and just use it.
1135 rflags
= read_rflags();
1137 all_but_self_ipi(XSNIFF_OFFSET
);
1138 gd
->gd_sample_pc
= smp_sniff
;
1139 gd
->gd_sample_sp
= &dummy
;
1140 write_rflags(rflags
);
1146 globaldata_t rgd
= globaldata_find(dcpu
);
1151 * Ignore all_but_self_ipi_enable here and just use it.
1153 rflags
= read_rflags();
1155 single_apic_ipi(dcpu
, XSNIFF_OFFSET
, APIC_DELMODE_FIXED
);
1156 rgd
->gd_sample_pc
= cpu_sniff
;
1157 rgd
->gd_sample_sp
= &dummy
;
1158 write_rflags(rflags
);
1162 * Called from Xinvltlb assembly with interrupts hard-disabled and in a
1163 * critical section. gd_intr_nesting_level may or may not be bumped
1164 * depending on entry.
1166 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT.
1167 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE
1168 * IS IN A CRITICAL SECTION.
1171 smp_inval_intr(void)
1173 struct mdglobaldata
*md
= mdcpu
;
1176 tsc_uclock_t tsc_base
= rdtsc();
1181 * The idle code is in a critical section, but that doesn't stop
1182 * Xinvltlb from executing, so deal with the race which can occur
1183 * in that situation. Otherwise r-m-w operations by pmap_inval_intr()
1184 * may have problems.
1186 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs
, md
->mi
.gd_cpuid
)) {
1187 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask
, md
->mi
.gd_cpuid
);
1194 * This is a real mess. I'd like to just leave interrupts disabled
1195 * but it can cause the lapic to deadlock if too many interrupts queue
1196 * to it, due to the idiotic design of the lapic. So instead we have
1197 * to enter a critical section so normal interrupts are made pending
1198 * and track whether this one was reentered.
1200 if (md
->gd_xinvaltlb
) { /* reentrant on cpu */
1201 md
->gd_xinvaltlb
= 2;
1204 md
->gd_xinvaltlb
= 1;
1207 * Check only those cpus with active Xinvl* commands pending.
1209 * We are going to enable interrupts so make sure we are in a
1210 * critical section. This is necessary to avoid deadlocking
1211 * the lapic and to ensure that we execute our commands prior to
1212 * any nominal interrupt or preemption.
1214 * WARNING! It is very important that we only clear out but in
1215 * smp_smurf_mask once for each interrupt we take. In
1216 * this case, we clear it on initial entry and only loop
1217 * on the reentrancy detect (caused by another interrupt).
1219 cpumask
= smp_invmask
;
1221 ATOMIC_CPUMASK_ORBIT(smp_in_mask
, md
->mi
.gd_cpuid
);
1225 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask
, md
->mi
.gd_cpuid
);
1228 * Specific page request(s), and we can't return until all bits
1235 * Also execute any pending full invalidation request in
1238 if (CPUMASK_TESTBIT(smp_invltlb_mask
, md
->mi
.gd_cpuid
)) {
1239 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask
,
1246 if (tsc_frequency
&& rdtsc() - tsc_base
> tsc_frequency
) {
1248 * cpuid - cpu doing the waiting
1249 * invmask - IPI in progress
1250 * invltlb_mask - which ones are TLB invalidations?
1252 kprintf("smp_inval_intr %2d, WARNING blocked >1 sec "
1253 "inv=%08jx tlbm=%08jx "
1258 "idle=%08jx/%08jx\n",
1261 smp_invltlb_mask
.ary
[0],
1262 smp_smurf_mask
.ary
[0],
1266 smp_idleinvl_mask
.ary
[0],
1267 smp_idleinvl_reqs
.ary
[0]);
1278 * We can only add bits to the cpumask to test during the
1279 * loop because the smp_invmask bit is cleared once the
1280 * originator completes the command (the targets may still
1281 * be cycling their own completions in this loop, afterwords).
1283 * lfence required prior to all tests as this Xinvltlb
1284 * interrupt could race the originator (already be in progress
1285 * wnen the originator decides to issue, due to an issue by
1289 CPUMASK_ORMASK(cpumask
, smp_invmask
);
1290 /*cpumask = smp_active_mask;*/ /* XXX */
1293 if (pmap_inval_intr(&cpumask
, toolong
) == 0) {
1295 * Clear our smurf mask to allow new IPIs, but deal
1296 * with potential races.
1302 * Test if someone sent us another invalidation IPI, break
1303 * out so we can take it to avoid deadlocking the lapic
1304 * interrupt queue (? stupid intel, amd).
1306 if (md
->gd_xinvaltlb
== 2)
1309 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid))
1315 * Full invalidation request
1317 if (CPUMASK_TESTBIT(smp_invltlb_mask
, md
->mi
.gd_cpuid
)) {
1318 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask
,
1325 * Check to see if another Xinvltlb interrupt occurred and loop up
1329 if (md
->gd_xinvaltlb
== 2) {
1330 md
->gd_xinvaltlb
= 1;
1334 ATOMIC_CPUMASK_NANDBIT(smp_in_mask
, md
->mi
.gd_cpuid
);
1336 md
->gd_xinvaltlb
= 0;
1340 cpu_wbinvd_on_all_cpus_callback(void *arg
)
1346 * When called the executing CPU will send an IPI to all other CPUs
1347 * requesting that they halt execution.
1349 * Usually (but not necessarily) called with 'other_cpus' as its arg.
1351 * - Signals all CPUs in map to stop.
1352 * - Waits for each to stop.
1359 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
1360 * from executing at same time.
1363 stop_cpus(cpumask_t map
)
1367 CPUMASK_ANDMASK(map
, smp_active_mask
);
1369 /* send the Xcpustop IPI to all CPUs in map */
1370 selected_apic_ipi(map
, XCPUSTOP_OFFSET
, APIC_DELMODE_FIXED
);
1373 mask
= stopped_cpus
;
1374 CPUMASK_ANDMASK(mask
, map
);
1376 } while (CPUMASK_CMPMASKNEQ(mask
, map
));
1383 * Called by a CPU to restart stopped CPUs.
1385 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
1387 * - Signals all CPUs in map to restart.
1388 * - Waits for each to restart.
1396 restart_cpus(cpumask_t map
)
1400 /* signal other cpus to restart */
1402 CPUMASK_ANDMASK(mask
, smp_active_mask
);
1404 started_cpus
= mask
;
1407 /* wait for each to clear its bit */
1408 while (CPUMASK_CMPMASKNEQ(stopped_cpus
, map
))
1415 * This is called once the mpboot code has gotten us properly relocated
1416 * and the MMU turned on, etc. ap_init() is actually the idle thread,
1417 * and when it returns the scheduler will call the real cpu_idle() main
1418 * loop for the idlethread. Interrupts are disabled on entry and should
1419 * remain disabled at return.
1427 * Adjust smp_startup_mask to signal the BSP that we have started
1428 * up successfully. Note that we do not yet hold the BGL. The BSP
1429 * is waiting for our signal.
1431 * We can't set our bit in smp_active_mask yet because we are holding
1432 * interrupts physically disabled and remote cpus could deadlock
1433 * trying to send us an IPI.
1435 ATOMIC_CPUMASK_ORBIT(smp_startup_mask
, mycpu
->gd_cpuid
);
1439 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is
1440 * non-zero, then get the MP lock.
1442 * Note: We are in a critical section.
1444 * Note: we are the idle thread, we can only spin.
1446 * Note: The load fence is memory volatile and prevents the compiler
1447 * from improperly caching mp_finish_lapic, and the cpu from improperly
1450 while (mp_finish_lapic
== 0) {
1455 while (try_mplock() == 0) {
1461 if (cpu_feature
& CPUID_TSC
) {
1463 * The BSP is constantly updating tsc0_offset, figure out
1464 * the relative difference to synchronize ktrdump.
1466 tsc_offsets
[mycpu
->gd_cpuid
] = rdtsc() - tsc0_offset
;
1469 /* BSP may have changed PTD while we're waiting for the lock */
1472 /* Build our map of 'other' CPUs. */
1473 mycpu
->gd_other_cpus
= smp_startup_mask
;
1474 ATOMIC_CPUMASK_NANDBIT(mycpu
->gd_other_cpus
, mycpu
->gd_cpuid
);
1476 /* A quick check from sanity claus */
1477 cpu_id
= APICID_TO_CPUID(LAPIC_READID
);
1478 if (mycpu
->gd_cpuid
!= cpu_id
) {
1479 kprintf("SMP: assigned cpuid = %d\n", mycpu
->gd_cpuid
);
1480 kprintf("SMP: actual cpuid = %d lapicid %d\n",
1481 cpu_id
, LAPIC_READID
);
1483 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD
[MPPTDI
]);
1485 panic("cpuid mismatch! boom!!");
1488 /* Initialize AP's local APIC for irq's */
1491 /* LAPIC initialization is done */
1492 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask
, mycpu
->gd_cpuid
);
1496 /* Let BSP move onto the next initialization stage */
1501 * Interlock for finalization. Wait until mp_finish is non-zero,
1502 * then get the MP lock.
1504 * Note: We are in a critical section.
1506 * Note: we are the idle thread, we can only spin.
1508 * Note: The load fence is memory volatile and prevents the compiler
1509 * from improperly caching mp_finish, and the cpu from improperly
1512 while (mp_finish
== 0) {
1517 /* BSP may have changed PTD while we're waiting for the lock */
1520 /* Set memory range attributes for this CPU to match the BSP */
1521 mem_range_AP_init();
1524 * Once we go active we must process any IPIQ messages that may
1525 * have been queued, because no actual IPI will occur until we
1526 * set our bit in the smp_active_mask. If we don't the IPI
1527 * message interlock could be left set which would also prevent
1530 * The idle loop doesn't expect the BGL to be held and while
1531 * lwkt_switch() normally cleans things up this is a special case
1532 * because we returning almost directly into the idle loop.
1534 * The idle thread is never placed on the runq, make sure
1535 * nothing we've done put it there.
1539 * Hold a critical section and allow real interrupts to occur. Zero
1540 * any spurious interrupts which have accumulated, then set our
1541 * smp_active_mask indicating that we are fully operational.
1544 __asm
__volatile("sti; pause; pause"::);
1545 bzero(mdcpu
->gd_ipending
, sizeof(mdcpu
->gd_ipending
));
1546 ATOMIC_CPUMASK_ORBIT(smp_active_mask
, mycpu
->gd_cpuid
);
1549 * Wait until all cpus have set their smp_active_mask and have fully
1550 * operational interrupts before proceeding.
1552 * We need a final cpu_invltlb() because we would not have received
1553 * any until we set our bit in smp_active_mask.
1555 while (mp_finish
== 1) {
1562 * Initialize per-cpu clocks and do other per-cpu initialization.
1563 * At this point code is expected to be able to use the full kernel
1566 initclocks_pcpu(); /* clock interrupts (via IPIs) */
1569 * Since we may have cleaned up the interrupt triggers, manually
1570 * process any pending IPIs before exiting our critical section.
1571 * Once the critical section has exited, normal interrupt processing
1574 atomic_swap_int(&mycpu
->gd_npoll
, 0);
1575 lwkt_process_ipiq();
1579 * Final final, allow the waiting BSP to resume the boot process,
1580 * return 'into' the idle thread bootstrap.
1582 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask
, mycpu
->gd_cpuid
);
1583 KKASSERT((curthread
->td_flags
& TDF_RUNQ
) == 0);
1587 * Get SMP fully working before we start initializing devices.
1594 kprintf("Finish MP startup\n");
1598 * Wait for the active mask to complete, after which all cpus will
1599 * be accepting interrupts.
1602 while (CPUMASK_CMPMASKNEQ(smp_active_mask
, smp_startup_mask
)) {
1608 * Wait for the finalization mask to complete, after which all cpus
1609 * have completely finished initializing and are entering or are in
1610 * their idle thread.
1612 * BSP should have received all required invltlbs but do another
1617 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask
, smp_startup_mask
)) {
1622 while (try_mplock() == 0) {
1628 kprintf("Active CPU Mask: %016jx\n",
1629 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask
));
1633 SYSINIT(finishsmp
, SI_BOOT2_FINISH_SMP
, SI_ORDER_FIRST
, ap_finish
, NULL
);
1636 * Interrupts must be hard-disabled by caller
1639 cpu_send_ipiq(int dcpu
)
1641 if (CPUMASK_TESTBIT(smp_active_mask
, dcpu
))
1642 single_apic_ipi(dcpu
, XIPIQ_OFFSET
, APIC_DELMODE_FIXED
);
1645 #if 0 /* single_apic_ipi_passive() not working yet */
1647 * Returns 0 on failure, 1 on success
1650 cpu_send_ipiq_passive(int dcpu
)
1653 if (CPUMASK_TESTBIT(smp_active_mask
, dcpu
)) {
1654 r
= single_apic_ipi_passive(dcpu
, XIPIQ_OFFSET
,
1655 APIC_DELMODE_FIXED
);
1662 mp_bsp_simple_setup(void)
1664 struct mdglobaldata
*gd
;
1667 /* build our map of 'other' CPUs */
1668 mycpu
->gd_other_cpus
= smp_startup_mask
;
1669 CPUMASK_NANDBIT(mycpu
->gd_other_cpus
, mycpu
->gd_cpuid
);
1671 gd
= (struct mdglobaldata
*)mycpu
;
1672 gd
->gd_acpi_id
= CPUID_TO_ACPIID(mycpu
->gd_cpuid
);
1674 ipiq_size
= sizeof(struct lwkt_ipiq
) * ncpus
;
1675 mycpu
->gd_ipiq
= (void *)kmem_alloc(kernel_map
, ipiq_size
,
1677 bzero(mycpu
->gd_ipiq
, ipiq_size
);
1679 /* initialize arc4random. */
1684 if (cpu_feature
& CPUID_TSC
)
1685 tsc0_offset
= rdtsc();
1690 * CPU TOPOLOGY DETECTION FUNCTIONS
1693 /* Detect intel topology using CPUID
1694 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41
1697 detect_intel_topology(int count_htt_cores
)
1701 int core_plus_logical_bits
= 0;
1702 int cores_per_package
;
1703 int logical_per_package
;
1704 int logical_per_core
;
1707 if (cpu_high
>= 0xb) {
1710 } else if (cpu_high
>= 0x4) {
1715 for (shift
= 0; (1 << shift
) < count_htt_cores
; ++shift
)
1717 logical_CPU_bits
= 1 << shift
;
1722 cpuid_count(0xb, FUNC_B_THREAD_LEVEL
, p
);
1724 /* if 0xb not supported - fallback to 0x4 */
1725 if (p
[1] == 0 || (FUNC_B_TYPE(p
[2]) != FUNC_B_THREAD_TYPE
)) {
1729 logical_CPU_bits
= FUNC_B_BITS_SHIFT_NEXT_LEVEL(p
[0]);
1731 ecx_index
= FUNC_B_THREAD_LEVEL
+ 1;
1733 cpuid_count(0xb, ecx_index
, p
);
1735 /* Check for the Core type in the implemented sub leaves. */
1736 if (FUNC_B_TYPE(p
[2]) == FUNC_B_CORE_TYPE
) {
1737 core_plus_logical_bits
= FUNC_B_BITS_SHIFT_NEXT_LEVEL(p
[0]);
1743 } while (FUNC_B_TYPE(p
[2]) != FUNC_B_INVALID_TYPE
);
1745 core_bits
= core_plus_logical_bits
- logical_CPU_bits
;
1750 cpuid_count(0x4, 0, p
);
1751 cores_per_package
= FUNC_4_MAX_CORE_NO(p
[0]) + 1;
1753 logical_per_package
= count_htt_cores
;
1754 logical_per_core
= logical_per_package
/ cores_per_package
;
1756 for (shift
= 0; (1 << shift
) < logical_per_core
; ++shift
)
1758 logical_CPU_bits
= shift
;
1760 for (shift
= 0; (1 << shift
) < cores_per_package
; ++shift
)
1767 /* Detect AMD topology using CPUID
1768 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page
1771 detect_amd_topology(int count_htt_cores
)
1774 if ((cpu_feature
& CPUID_HTT
) && (amd_feature2
& AMDID2_CMP
)) {
1775 if (cpu_procinfo2
& AMDID_COREID_SIZE
) {
1776 core_bits
= (cpu_procinfo2
& AMDID_COREID_SIZE
) >>
1777 AMDID_COREID_SIZE_SHIFT
;
1779 core_bits
= (cpu_procinfo2
& AMDID_CMP_CORES
) + 1;
1780 for (shift
= 0; (1 << shift
) < core_bits
; ++shift
)
1784 logical_CPU_bits
= count_htt_cores
>> core_bits
;
1785 for (shift
= 0; (1 << shift
) < logical_CPU_bits
; ++shift
)
1787 logical_CPU_bits
= shift
;
1789 kprintf("core_bits %d logical_CPU_bits %d\n",
1790 core_bits
- logical_CPU_bits
, logical_CPU_bits
);
1792 if (amd_feature2
& AMDID2_TOPOEXT
) {
1793 u_int p
[4]; /* eax,ebx,ecx,edx */
1796 cpuid_count(0x8000001e, 0, p
);
1798 switch(((p
[1] >> 8) & 3) + 1) {
1800 logical_CPU_bits
= 0;
1803 logical_CPU_bits
= 1;
1807 logical_CPU_bits
= 2;
1812 * Nodes are kind of a stand-in for packages*sockets,
1813 * but can be thought of in terms of Numa domains.
1815 nodes
= ((p
[2] >> 8) & 7) + 1;
1833 core_bits
-= logical_CPU_bits
;
1834 kprintf("%d-way htt, %d Nodes, %d cores/node\n",
1835 (int)(((p
[1] >> 8) & 3) + 1),
1841 if (amd_feature2
& AMDID2_TOPOEXT
) {
1848 logical_CPU_bits
= 0;
1851 for (i
= 0; i
< 256; ++i
) {
1852 cpuid_count(0x8000001d, i
, p
);
1854 level
= (p
[0] >> 5) & 0x7;
1855 share_count
= 1 + ((p
[0] >> 14) & 0xfff);
1859 kprintf("Topology probe i=%2d type=%d "
1860 "level=%d share_count=%d\n",
1861 i
, type
, level
, share_count
);
1863 while ((1 << shift
) < share_count
)
1873 logical_CPU_bits
= shift
;
1879 * Physical subdivision of a package
1881 core_bits
= logical_CPU_bits
+
1888 * CPU L1/L2/L3 cache
1895 * Package aka chip, equivalent to
1904 for (shift
= 0; (1 << shift
) < count_htt_cores
; ++shift
)
1907 logical_CPU_bits
= 0;
1912 amd_get_compute_unit_id(void *arg
)
1916 do_cpuid(0x8000001e, regs
);
1917 cpu_node_t
* mynode
= get_cpu_node_by_cpuid(mycpuid
);
1920 * AMD - CPUID Specification September 2010
1921 * page 34 - //ComputeUnitID = ebx[0:7]//
1923 mynode
->compute_unit_id
= regs
[1] & 0xff;
1927 fix_amd_topology(void)
1931 if (cpu_vendor_id
!= CPU_VENDOR_AMD
)
1933 if ((amd_feature2
& AMDID2_TOPOEXT
) == 0)
1936 CPUMASK_ASSALLONES(mask
);
1937 lwkt_cpusync_simple(mask
, amd_get_compute_unit_id
, NULL
);
1939 kprintf("Compute unit iDS:\n");
1941 for (i
= 0; i
< ncpus
; i
++) {
1942 kprintf("%d-%d; \n",
1943 i
, get_cpu_node_by_cpuid(i
)->compute_unit_id
);
1950 * - logical_CPU_bits
1952 * With the values above (for AMD or INTEL) we are able to generally
1953 * detect the CPU topology (number of cores for each level):
1954 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1955 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf
1958 detect_cpu_topology(void)
1960 static int topology_detected
= 0;
1963 if (topology_detected
)
1965 if ((cpu_feature
& CPUID_HTT
) == 0) {
1967 logical_CPU_bits
= 0;
1970 count
= (cpu_procinfo
& CPUID_HTT_CORES
) >> CPUID_HTT_CORE_SHIFT
;
1972 if (cpu_vendor_id
== CPU_VENDOR_INTEL
)
1973 detect_intel_topology(count
);
1974 else if (cpu_vendor_id
== CPU_VENDOR_AMD
)
1975 detect_amd_topology(count
);
1976 topology_detected
= 1;
1980 kprintf("Bits within APICID: logical_CPU_bits: %d; "
1982 logical_CPU_bits
, core_bits
);
1987 * Interface functions to calculate chip_ID,
1988 * core_number and logical_number
1989 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1992 get_chip_ID(int cpuid
)
1994 return get_apicid_from_cpuid(cpuid
) >>
1995 (logical_CPU_bits
+ core_bits
);
1999 get_chip_ID_from_APICID(int apicid
)
2001 return apicid
>> (logical_CPU_bits
+ core_bits
);
2005 get_core_number_within_chip(int cpuid
)
2007 return ((get_apicid_from_cpuid(cpuid
) >> logical_CPU_bits
) &
2008 ((1 << core_bits
) - 1));
2012 get_logical_CPU_number_within_core(int cpuid
)
2014 return (get_apicid_from_cpuid(cpuid
) &
2015 ((1 << logical_CPU_bits
) - 1));