9453 Include KPTI status in kernel boot messages
[unleashed.git] / usr / src / uts / i86pc / os / mp_startup.c
blobf1a243343ae6a3c779ccaf18c7d7eb9edfa82c6d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
30 * Copyright 2018 Joyent, Inc.
31 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
32 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
35 #include <sys/types.h>
36 #include <sys/thread.h>
37 #include <sys/cpuvar.h>
38 #include <sys/cpu.h>
39 #include <sys/t_lock.h>
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/disp.h>
43 #include <sys/class.h>
44 #include <sys/cmn_err.h>
45 #include <sys/debug.h>
46 #include <sys/note.h>
47 #include <sys/asm_linkage.h>
48 #include <sys/x_call.h>
49 #include <sys/systm.h>
50 #include <sys/var.h>
51 #include <sys/vtrace.h>
52 #include <vm/hat.h>
53 #include <vm/as.h>
54 #include <vm/seg_kmem.h>
55 #include <vm/seg_kp.h>
56 #include <sys/segments.h>
57 #include <sys/kmem.h>
58 #include <sys/stack.h>
59 #include <sys/smp_impldefs.h>
60 #include <sys/x86_archext.h>
61 #include <sys/machsystm.h>
62 #include <sys/traptrace.h>
63 #include <sys/clock.h>
64 #include <sys/cpc_impl.h>
65 #include <sys/pg.h>
66 #include <sys/cmt.h>
67 #include <sys/dtrace.h>
68 #include <sys/archsystm.h>
69 #include <sys/fp.h>
70 #include <sys/reboot.h>
71 #include <sys/kdi_machimpl.h>
72 #include <vm/hat_i86.h>
73 #include <vm/vm_dep.h>
74 #include <sys/memnode.h>
75 #include <sys/pci_cfgspace.h>
76 #include <sys/mach_mmu.h>
77 #include <sys/sysmacros.h>
78 #if defined(__xpv)
79 #include <sys/hypervisor.h>
80 #endif
81 #include <sys/cpu_module.h>
82 #include <sys/ontrap.h>
84 struct cpu cpus[1] __aligned(MMU_PAGESIZE);
85 struct cpu *cpu[NCPU] = {&cpus[0]};
86 struct cpu *cpu_free_list;
87 cpu_core_t cpu_core[NCPU];
89 #define cpu_next_free cpu_prev
92 * Useful for disabling MP bring-up on a MP capable system.
94 int use_mp = 1;
97 * to be set by a PSM to indicate what cpus
98 * are sitting around on the system.
100 cpuset_t mp_cpus;
103 * This variable is used by the hat layer to decide whether or not
104 * critical sections are needed to prevent race conditions. For sun4m,
105 * this variable is set once enough MP initialization has been done in
106 * order to allow cross calls.
108 int flushes_require_xcalls;
110 cpuset_t cpu_ready_set; /* initialized in startup() */
112 static void mp_startup_boot(void);
113 static void mp_startup_hotplug(void);
115 static void cpu_sep_enable(void);
116 static void cpu_sep_disable(void);
117 static void cpu_asysc_enable(void);
118 static void cpu_asysc_disable(void);
121 * Init CPU info - get CPU type info for processor_info system call.
123 void
124 init_cpu_info(struct cpu *cp)
126 processor_info_t *pi = &cp->cpu_type_info;
129 * Get clock-frequency property for the CPU.
131 pi->pi_clock = cpu_freq;
134 * Current frequency in Hz.
136 cp->cpu_curr_clock = cpu_freq_hz;
139 * Supported frequencies.
141 if (cp->cpu_supp_freqs == NULL) {
142 cpu_set_supp_freqs(cp, NULL);
145 (void) strcpy(pi->pi_processor_type, "i386");
146 if (fpu_exists)
147 (void) strcpy(pi->pi_fputypes, "i387 compatible");
149 cp->cpu_idstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
150 cp->cpu_brandstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP);
153 * If called for the BSP, cp is equal to current CPU.
154 * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info
155 * of current CPU as default values for cpu_idstr and cpu_brandstr.
156 * They will be corrected in mp_startup_common() after cpuid_pass1()
157 * has been invoked on target CPU.
159 (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN);
160 (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN);
164 * Configure syscall support on this CPU.
166 /*ARGSUSED*/
167 void
168 init_cpu_syscall(struct cpu *cp)
170 kpreempt_disable();
172 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
173 is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
174 uint64_t flags;
176 #if !defined(__xpv)
178 * The syscall instruction imposes a certain ordering on
179 * segment selectors, so we double-check that ordering
180 * here.
182 CTASSERT(KDS_SEL == KCS_SEL + 8);
183 CTASSERT(UDS_SEL == U32CS_SEL + 8);
184 CTASSERT(UCS_SEL == U32CS_SEL + 16);
185 #endif
188 * Turn syscall/sysret extensions on.
190 cpu_asysc_enable();
193 * Program the magic registers ..
195 wrmsr(MSR_AMD_STAR,
196 ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32);
197 if (kpti_enable == 1) {
198 wrmsr(MSR_AMD_LSTAR,
199 (uint64_t)(uintptr_t)tr_sys_syscall);
200 wrmsr(MSR_AMD_CSTAR,
201 (uint64_t)(uintptr_t)tr_sys_syscall32);
202 } else {
203 wrmsr(MSR_AMD_LSTAR,
204 (uint64_t)(uintptr_t)sys_syscall);
205 wrmsr(MSR_AMD_CSTAR,
206 (uint64_t)(uintptr_t)sys_syscall32);
210 * This list of flags is masked off the incoming
211 * %rfl when we enter the kernel.
213 flags = PS_IE | PS_T;
214 if (is_x86_feature(x86_featureset, X86FSET_SMAP) == B_TRUE)
215 flags |= PS_ACHK;
216 wrmsr(MSR_AMD_SFMASK, flags);
220 * On 64-bit kernels on Nocona machines, the 32-bit syscall
221 * variant isn't available to 32-bit applications, but sysenter is.
223 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
224 is_x86_feature(x86_featureset, X86FSET_SEP)) {
226 #if !defined(__xpv)
228 * The sysenter instruction imposes a certain ordering on
229 * segment selectors, so we double-check that ordering
230 * here. See "sysenter" in Intel document 245471-012, "IA-32
231 * Intel Architecture Software Developer's Manual Volume 2:
232 * Instruction Set Reference"
234 CTASSERT(KDS_SEL == KCS_SEL + 8);
236 CTASSERT(U32CS_SEL == ((KCS_SEL + 16) | 3));
237 CTASSERT(UDS_SEL == U32CS_SEL + 8);
238 #endif
240 cpu_sep_enable();
243 * resume() sets this value to the base of the threads stack
244 * via a context handler.
246 wrmsr(MSR_INTC_SEP_ESP, 0);
248 if (kpti_enable == 1) {
249 wrmsr(MSR_INTC_SEP_EIP,
250 (uint64_t)(uintptr_t)tr_sys_sysenter);
251 } else {
252 wrmsr(MSR_INTC_SEP_EIP,
253 (uint64_t)(uintptr_t)sys_sysenter);
257 kpreempt_enable();
260 #if !defined(__xpv)
262 * Configure per-cpu ID GDT
264 static void
265 init_cpu_id_gdt(struct cpu *cp)
267 /* Write cpu_id into limit field of GDT for usermode retrieval */
268 #if defined(__amd64)
269 set_usegd(&cp->cpu_gdt[GDT_CPUID], SDP_SHORT, NULL, cp->cpu_id,
270 SDT_MEMRODA, SEL_UPL, SDP_BYTES, SDP_OP32);
271 #elif defined(__i386)
272 set_usegd(&cp->cpu_gdt[GDT_CPUID], NULL, cp->cpu_id, SDT_MEMRODA,
273 SEL_UPL, SDP_BYTES, SDP_OP32);
274 #endif
276 #endif /* !defined(__xpv) */
279 * Multiprocessor initialization.
281 * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the
282 * startup and idle threads for the specified CPU.
283 * Parameter boot is true for boot time operations and is false for CPU
284 * DR operations.
286 static struct cpu *
287 mp_cpu_configure_common(int cpun, boolean_t boot)
289 struct cpu *cp;
290 kthread_id_t tp;
291 caddr_t sp;
292 proc_t *procp;
293 #if !defined(__xpv)
294 extern int idle_cpu_prefer_mwait;
295 extern void cpu_idle_mwait();
296 #endif
297 extern void idle();
298 extern void cpu_idle();
300 #ifdef TRAPTRACE
301 trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun];
302 #endif
304 ASSERT(MUTEX_HELD(&cpu_lock));
305 ASSERT(cpun < NCPU && cpu[cpun] == NULL);
307 if (cpu_free_list == NULL) {
308 cp = kmem_zalloc(sizeof (*cp), KM_SLEEP);
309 } else {
310 cp = cpu_free_list;
311 cpu_free_list = cp->cpu_next_free;
314 cp->cpu_m.mcpu_istamp = cpun << 16;
316 /* Create per CPU specific threads in the process p0. */
317 procp = &p0;
320 * Initialize the dispatcher first.
322 disp_cpu_init(cp);
324 cpu_vm_data_init(cp);
327 * Allocate and initialize the startup thread for this CPU.
328 * Interrupt and process switch stacks get allocated later
329 * when the CPU starts running.
331 tp = thread_create(NULL, 0, NULL, NULL, 0, procp,
332 TS_STOPPED, maxclsyspri);
335 * Set state to TS_ONPROC since this thread will start running
336 * as soon as the CPU comes online.
338 * All the other fields of the thread structure are setup by
339 * thread_create().
341 THREAD_ONPROC(tp, cp);
342 tp->t_preempt = 1;
343 tp->t_bound_cpu = cp;
344 tp->t_affinitycnt = 1;
345 tp->t_cpu = cp;
346 tp->t_disp_queue = cp->cpu_disp;
349 * Setup thread to start in mp_startup_common.
351 sp = tp->t_stk;
352 tp->t_sp = (uintptr_t)(sp - MINFRAME);
353 #if defined(__amd64)
354 tp->t_sp -= STACK_ENTRY_ALIGN; /* fake a call */
355 #endif
357 * Setup thread start entry point for boot or hotplug.
359 if (boot) {
360 tp->t_pc = (uintptr_t)mp_startup_boot;
361 } else {
362 tp->t_pc = (uintptr_t)mp_startup_hotplug;
365 cp->cpu_id = cpun;
366 cp->cpu_self = cp;
367 cp->cpu_thread = tp;
368 cp->cpu_lwp = NULL;
369 cp->cpu_dispthread = tp;
370 cp->cpu_dispatch_pri = DISP_PRIO(tp);
373 * cpu_base_spl must be set explicitly here to prevent any blocking
374 * operations in mp_startup_common from causing the spl of the cpu
375 * to drop to 0 (allowing device interrupts before we're ready) in
376 * resume().
377 * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY.
378 * As an extra bit of security on DEBUG kernels, this is enforced with
379 * an assertion in mp_startup_common() -- before cpu_base_spl is set
380 * to its proper value.
382 cp->cpu_base_spl = ipltospl(LOCK_LEVEL);
385 * Now, initialize per-CPU idle thread for this CPU.
387 tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1);
389 cp->cpu_idle_thread = tp;
391 tp->t_preempt = 1;
392 tp->t_bound_cpu = cp;
393 tp->t_affinitycnt = 1;
394 tp->t_cpu = cp;
395 tp->t_disp_queue = cp->cpu_disp;
398 * Bootstrap the CPU's PG data
400 pg_cpu_bootstrap(cp);
403 * Perform CPC initialization on the new CPU.
405 kcpc_hw_init(cp);
408 * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2
409 * for each CPU.
411 setup_vaddr_for_ppcopy(cp);
414 * Allocate page for new GDT and initialize from current GDT.
416 #if !defined(__lint)
417 ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE);
418 #endif
419 cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP);
420 bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT));
422 #if defined(__i386)
424 * setup kernel %gs.
426 set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
427 SEL_KPL, 0, 1);
428 #endif
431 * Allocate pages for the CPU LDT.
433 cp->cpu_m.mcpu_ldt = kmem_zalloc(LDT_CPU_SIZE, KM_SLEEP);
434 cp->cpu_m.mcpu_ldt_len = 0;
437 * Allocate a per-CPU IDT and initialize the new IDT to the currently
438 * runing CPU.
440 #if !defined(__lint)
441 ASSERT((sizeof (*CPU->cpu_idt) * NIDT) <= PAGESIZE);
442 #endif
443 cp->cpu_idt = kmem_alloc(PAGESIZE, KM_SLEEP);
444 bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE);
447 * alloc space for cpuid info
449 cpuid_alloc_space(cp);
450 #if !defined(__xpv)
451 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
452 idle_cpu_prefer_mwait) {
453 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
454 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
455 } else
456 #endif
457 cp->cpu_m.mcpu_idle_cpu = cpu_idle;
459 init_cpu_info(cp);
461 #if !defined(__xpv)
462 init_cpu_id_gdt(cp);
463 #endif
466 * alloc space for ucode_info
468 ucode_alloc_space(cp);
469 xc_init_cpu(cp);
470 hat_cpu_online(cp);
472 #ifdef TRAPTRACE
474 * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers
476 ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP);
477 ttc->ttc_next = ttc->ttc_first;
478 ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize;
479 #endif
482 * Record that we have another CPU.
485 * Initialize the interrupt threads for this CPU
487 cpu_intr_alloc(cp, NINTR_THREADS);
489 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
490 cpu_set_state(cp);
493 * Add CPU to list of available CPUs. It'll be on the active list
494 * after mp_startup_common().
496 cpu_add_unit(cp);
498 return (cp);
502 * Undo what was done in mp_cpu_configure_common
504 static void
505 mp_cpu_unconfigure_common(struct cpu *cp, int error)
507 ASSERT(MUTEX_HELD(&cpu_lock));
510 * Remove the CPU from the list of available CPUs.
512 cpu_del_unit(cp->cpu_id);
514 if (error == ETIMEDOUT) {
516 * The cpu was started, but never *seemed* to run any
517 * code in the kernel; it's probably off spinning in its
518 * own private world, though with potential references to
519 * our kmem-allocated IDTs and GDTs (for example).
521 * Worse still, it may actually wake up some time later,
522 * so rather than guess what it might or might not do, we
523 * leave the fundamental data structures intact.
525 cp->cpu_flags = 0;
526 return;
530 * At this point, the only threads bound to this CPU should
531 * special per-cpu threads: it's idle thread, it's pause threads,
532 * and it's interrupt threads. Clean these up.
534 cpu_destroy_bound_threads(cp);
535 cp->cpu_idle_thread = NULL;
538 * Free the interrupt stack.
540 segkp_release(segkp,
541 cp->cpu_intr_stack - (INTR_STACK_SIZE - SA(MINFRAME)));
542 cp->cpu_intr_stack = NULL;
544 #ifdef TRAPTRACE
546 * Discard the trap trace buffer
549 trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id];
551 kmem_free((void *)ttc->ttc_first, trap_trace_bufsize);
552 ttc->ttc_first = NULL;
554 #endif
556 hat_cpu_offline(cp);
558 ucode_free_space(cp);
560 /* Free CPU ID string and brand string. */
561 if (cp->cpu_idstr) {
562 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN);
563 cp->cpu_idstr = NULL;
565 if (cp->cpu_brandstr) {
566 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN);
567 cp->cpu_brandstr = NULL;
570 #if !defined(__xpv)
571 if (cp->cpu_m.mcpu_mwait != NULL) {
572 cpuid_mwait_free(cp);
573 cp->cpu_m.mcpu_mwait = NULL;
575 #endif
576 cpuid_free_space(cp);
578 if (cp->cpu_idt != CPU->cpu_idt)
579 kmem_free(cp->cpu_idt, PAGESIZE);
580 cp->cpu_idt = NULL;
582 kmem_free(cp->cpu_m.mcpu_ldt, LDT_CPU_SIZE);
583 cp->cpu_m.mcpu_ldt = NULL;
584 cp->cpu_m.mcpu_ldt_len = 0;
586 kmem_free(cp->cpu_gdt, PAGESIZE);
587 cp->cpu_gdt = NULL;
589 if (cp->cpu_supp_freqs != NULL) {
590 size_t len = strlen(cp->cpu_supp_freqs) + 1;
591 kmem_free(cp->cpu_supp_freqs, len);
592 cp->cpu_supp_freqs = NULL;
595 teardown_vaddr_for_ppcopy(cp);
597 kcpc_hw_fini(cp);
599 cp->cpu_dispthread = NULL;
600 cp->cpu_thread = NULL; /* discarded by cpu_destroy_bound_threads() */
602 cpu_vm_data_destroy(cp);
604 xc_fini_cpu(cp);
605 disp_cpu_fini(cp);
607 ASSERT(cp != CPU0);
608 bzero(cp, sizeof (*cp));
609 cp->cpu_next_free = cpu_free_list;
610 cpu_free_list = cp;
614 * Apply workarounds for known errata, and warn about those that are absent.
616 * System vendors occasionally create configurations which contain different
617 * revisions of the CPUs that are almost but not exactly the same. At the
618 * time of writing, this meant that their clock rates were the same, their
619 * feature sets were the same, but the required workaround were -not-
620 * necessarily the same. So, this routine is invoked on -every- CPU soon
621 * after starting to make sure that the resulting system contains the most
622 * pessimal set of workarounds needed to cope with *any* of the CPUs in the
623 * system.
625 * workaround_errata is invoked early in mlsetup() for CPU 0, and in
626 * mp_startup_common() for all slave CPUs. Slaves process workaround_errata
627 * prior to acknowledging their readiness to the master, so this routine will
628 * never be executed by multiple CPUs in parallel, thus making updates to
629 * global data safe.
631 * These workarounds are based on Rev 3.57 of the Revision Guide for
632 * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005.
635 #if defined(OPTERON_ERRATUM_88)
636 int opteron_erratum_88; /* if non-zero -> at least one cpu has it */
637 #endif
639 #if defined(OPTERON_ERRATUM_91)
640 int opteron_erratum_91; /* if non-zero -> at least one cpu has it */
641 #endif
643 #if defined(OPTERON_ERRATUM_93)
644 int opteron_erratum_93; /* if non-zero -> at least one cpu has it */
645 #endif
647 #if defined(OPTERON_ERRATUM_95)
648 int opteron_erratum_95; /* if non-zero -> at least one cpu has it */
649 #endif
651 #if defined(OPTERON_ERRATUM_100)
652 int opteron_erratum_100; /* if non-zero -> at least one cpu has it */
653 #endif
655 #if defined(OPTERON_ERRATUM_108)
656 int opteron_erratum_108; /* if non-zero -> at least one cpu has it */
657 #endif
659 #if defined(OPTERON_ERRATUM_109)
660 int opteron_erratum_109; /* if non-zero -> at least one cpu has it */
661 #endif
663 #if defined(OPTERON_ERRATUM_121)
664 int opteron_erratum_121; /* if non-zero -> at least one cpu has it */
665 #endif
667 #if defined(OPTERON_ERRATUM_122)
668 int opteron_erratum_122; /* if non-zero -> at least one cpu has it */
669 #endif
671 #if defined(OPTERON_ERRATUM_123)
672 int opteron_erratum_123; /* if non-zero -> at least one cpu has it */
673 #endif
675 #if defined(OPTERON_ERRATUM_131)
676 int opteron_erratum_131; /* if non-zero -> at least one cpu has it */
677 #endif
679 #if defined(OPTERON_WORKAROUND_6336786)
680 int opteron_workaround_6336786; /* non-zero -> WA relevant and applied */
681 int opteron_workaround_6336786_UP = 0; /* Not needed for UP */
682 #endif
684 #if defined(OPTERON_WORKAROUND_6323525)
685 int opteron_workaround_6323525; /* if non-zero -> at least one cpu has it */
686 #endif
688 #if defined(OPTERON_ERRATUM_298)
689 int opteron_erratum_298;
690 #endif
692 #if defined(OPTERON_ERRATUM_721)
693 int opteron_erratum_721;
694 #endif
696 static void
697 workaround_warning(cpu_t *cp, uint_t erratum)
699 cmn_err(CE_WARN, "cpu%d: no workaround for erratum %u",
700 cp->cpu_id, erratum);
703 static void
704 workaround_applied(uint_t erratum)
706 if (erratum > 1000000)
707 cmn_err(CE_CONT, "?workaround applied for cpu issue #%d\n",
708 erratum);
709 else
710 cmn_err(CE_CONT, "?workaround applied for cpu erratum #%d\n",
711 erratum);
714 static void
715 msr_warning(cpu_t *cp, const char *rw, uint_t msr, int error)
717 cmn_err(CE_WARN, "cpu%d: couldn't %smsr 0x%x, error %d",
718 cp->cpu_id, rw, msr, error);
722 * Determine the number of nodes in a Hammer / Greyhound / Griffin family
723 * system.
725 static uint_t
726 opteron_get_nnodes(void)
728 static uint_t nnodes = 0;
730 if (nnodes == 0) {
731 #ifdef DEBUG
732 uint_t family;
735 * This routine uses a PCI config space based mechanism
736 * for retrieving the number of nodes in the system.
737 * Device 24, function 0, offset 0x60 as used here is not
738 * AMD processor architectural, and may not work on processor
739 * families other than those listed below.
741 * Callers of this routine must ensure that we're running on
742 * a processor which supports this mechanism.
743 * The assertion below is meant to catch calls on unsupported
744 * processors.
746 family = cpuid_getfamily(CPU);
747 ASSERT(family == 0xf || family == 0x10 || family == 0x11);
748 #endif /* DEBUG */
751 * Obtain the number of nodes in the system from
752 * bits [6:4] of the Node ID register on node 0.
754 * The actual node count is NodeID[6:4] + 1
756 * The Node ID register is accessed via function 0,
757 * offset 0x60. Node 0 is device 24.
759 nnodes = ((pci_getl_func(0, 24, 0, 0x60) & 0x70) >> 4) + 1;
761 return (nnodes);
764 uint_t
765 do_erratum_298(struct cpu *cpu)
767 static int osvwrc = -3;
768 extern int osvw_opteron_erratum(cpu_t *, uint_t);
771 * L2 Eviction May Occur During Processor Operation To Set
772 * Accessed or Dirty Bit.
774 if (osvwrc == -3) {
775 osvwrc = osvw_opteron_erratum(cpu, 298);
776 } else {
777 /* osvw return codes should be consistent for all cpus */
778 ASSERT(osvwrc == osvw_opteron_erratum(cpu, 298));
781 switch (osvwrc) {
782 case 0: /* erratum is not present: do nothing */
783 break;
784 case 1: /* erratum is present: BIOS workaround applied */
786 * check if workaround is actually in place and issue warning
787 * if not.
789 if (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) ||
790 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0)) {
791 #if defined(OPTERON_ERRATUM_298)
792 opteron_erratum_298++;
793 #else
794 workaround_warning(cpu, 298);
795 return (1);
796 #endif
798 break;
799 case -1: /* cannot determine via osvw: check cpuid */
800 if ((cpuid_opteron_erratum(cpu, 298) > 0) &&
801 (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) ||
802 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0))) {
803 #if defined(OPTERON_ERRATUM_298)
804 opteron_erratum_298++;
805 #else
806 workaround_warning(cpu, 298);
807 return (1);
808 #endif
810 break;
812 return (0);
815 uint_t
816 workaround_errata(struct cpu *cpu)
818 uint_t missing = 0;
820 ASSERT(cpu == CPU);
822 /*LINTED*/
823 if (cpuid_opteron_erratum(cpu, 88) > 0) {
825 * SWAPGS May Fail To Read Correct GS Base
827 #if defined(OPTERON_ERRATUM_88)
829 * The workaround is an mfence in the relevant assembler code
831 opteron_erratum_88++;
832 #else
833 workaround_warning(cpu, 88);
834 missing++;
835 #endif
838 if (cpuid_opteron_erratum(cpu, 91) > 0) {
840 * Software Prefetches May Report A Page Fault
842 #if defined(OPTERON_ERRATUM_91)
844 * fix is in trap.c
846 opteron_erratum_91++;
847 #else
848 workaround_warning(cpu, 91);
849 missing++;
850 #endif
853 if (cpuid_opteron_erratum(cpu, 93) > 0) {
855 * RSM Auto-Halt Restart Returns to Incorrect RIP
857 #if defined(OPTERON_ERRATUM_93)
859 * fix is in trap.c
861 opteron_erratum_93++;
862 #else
863 workaround_warning(cpu, 93);
864 missing++;
865 #endif
868 /*LINTED*/
869 if (cpuid_opteron_erratum(cpu, 95) > 0) {
871 * RET Instruction May Return to Incorrect EIP
873 #if defined(OPTERON_ERRATUM_95)
874 #if defined(_LP64)
876 * Workaround this by ensuring that 32-bit user code and
877 * 64-bit kernel code never occupy the same address
878 * range mod 4G.
880 if (_userlimit32 > 0xc0000000ul)
881 *(uintptr_t *)&_userlimit32 = 0xc0000000ul;
883 /*LINTED*/
884 ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u);
885 opteron_erratum_95++;
886 #endif /* _LP64 */
887 #else
888 workaround_warning(cpu, 95);
889 missing++;
890 #endif
893 if (cpuid_opteron_erratum(cpu, 100) > 0) {
895 * Compatibility Mode Branches Transfer to Illegal Address
897 #if defined(OPTERON_ERRATUM_100)
899 * fix is in trap.c
901 opteron_erratum_100++;
902 #else
903 workaround_warning(cpu, 100);
904 missing++;
905 #endif
908 /*LINTED*/
909 if (cpuid_opteron_erratum(cpu, 108) > 0) {
911 * CPUID Instruction May Return Incorrect Model Number In
912 * Some Processors
914 #if defined(OPTERON_ERRATUM_108)
916 * (Our cpuid-handling code corrects the model number on
917 * those processors)
919 #else
920 workaround_warning(cpu, 108);
921 missing++;
922 #endif
925 /*LINTED*/
926 if (cpuid_opteron_erratum(cpu, 109) > 0) do {
928 * Certain Reverse REP MOVS May Produce Unpredictable Behavior
930 #if defined(OPTERON_ERRATUM_109)
932 * The "workaround" is to print a warning to upgrade the BIOS
934 uint64_t value;
935 const uint_t msr = MSR_AMD_PATCHLEVEL;
936 int err;
938 if ((err = checked_rdmsr(msr, &value)) != 0) {
939 msr_warning(cpu, "rd", msr, err);
940 workaround_warning(cpu, 109);
941 missing++;
943 if (value == 0)
944 opteron_erratum_109++;
945 #else
946 workaround_warning(cpu, 109);
947 missing++;
948 #endif
949 /*CONSTANTCONDITION*/
950 } while (0);
952 /*LINTED*/
953 if (cpuid_opteron_erratum(cpu, 121) > 0) {
955 * Sequential Execution Across Non_Canonical Boundary Caused
956 * Processor Hang
958 #if defined(OPTERON_ERRATUM_121)
959 #if defined(_LP64)
961 * Erratum 121 is only present in long (64 bit) mode.
962 * Workaround is to include the page immediately before the
963 * va hole to eliminate the possibility of system hangs due to
964 * sequential execution across the va hole boundary.
966 if (opteron_erratum_121)
967 opteron_erratum_121++;
968 else {
969 if (hole_start) {
970 hole_start -= PAGESIZE;
971 } else {
973 * hole_start not yet initialized by
974 * mmu_init. Initialize hole_start
975 * with value to be subtracted.
977 hole_start = PAGESIZE;
979 opteron_erratum_121++;
981 #endif /* _LP64 */
982 #else
983 workaround_warning(cpu, 121);
984 missing++;
985 #endif
988 /*LINTED*/
989 if (cpuid_opteron_erratum(cpu, 122) > 0) do {
991 * TLB Flush Filter May Cause Coherency Problem in
992 * Multiprocessor Systems
994 #if defined(OPTERON_ERRATUM_122)
995 uint64_t value;
996 const uint_t msr = MSR_AMD_HWCR;
997 int error;
1000 * Erratum 122 is only present in MP configurations (multi-core
1001 * or multi-processor).
1003 #if defined(__xpv)
1004 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1005 break;
1006 if (!opteron_erratum_122 && xpv_nr_phys_cpus() == 1)
1007 break;
1008 #else
1009 if (!opteron_erratum_122 && opteron_get_nnodes() == 1 &&
1010 cpuid_get_ncpu_per_chip(cpu) == 1)
1011 break;
1012 #endif
1013 /* disable TLB Flush Filter */
1015 if ((error = checked_rdmsr(msr, &value)) != 0) {
1016 msr_warning(cpu, "rd", msr, error);
1017 workaround_warning(cpu, 122);
1018 missing++;
1019 } else {
1020 value |= (uint64_t)AMD_HWCR_FFDIS;
1021 if ((error = checked_wrmsr(msr, value)) != 0) {
1022 msr_warning(cpu, "wr", msr, error);
1023 workaround_warning(cpu, 122);
1024 missing++;
1027 opteron_erratum_122++;
1028 #else
1029 workaround_warning(cpu, 122);
1030 missing++;
1031 #endif
1032 /*CONSTANTCONDITION*/
1033 } while (0);
1035 /*LINTED*/
1036 if (cpuid_opteron_erratum(cpu, 123) > 0) do {
1038 * Bypassed Reads May Cause Data Corruption of System Hang in
1039 * Dual Core Processors
1041 #if defined(OPTERON_ERRATUM_123)
1042 uint64_t value;
1043 const uint_t msr = MSR_AMD_PATCHLEVEL;
1044 int err;
1047 * Erratum 123 applies only to multi-core cpus.
1049 if (cpuid_get_ncpu_per_chip(cpu) < 2)
1050 break;
1051 #if defined(__xpv)
1052 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1053 break;
1054 #endif
1056 * The "workaround" is to print a warning to upgrade the BIOS
1058 if ((err = checked_rdmsr(msr, &value)) != 0) {
1059 msr_warning(cpu, "rd", msr, err);
1060 workaround_warning(cpu, 123);
1061 missing++;
1063 if (value == 0)
1064 opteron_erratum_123++;
1065 #else
1066 workaround_warning(cpu, 123);
1067 missing++;
1069 #endif
1070 /*CONSTANTCONDITION*/
1071 } while (0);
1073 /*LINTED*/
1074 if (cpuid_opteron_erratum(cpu, 131) > 0) do {
1076 * Multiprocessor Systems with Four or More Cores May Deadlock
1077 * Waiting for a Probe Response
1079 #if defined(OPTERON_ERRATUM_131)
1080 uint64_t nbcfg;
1081 const uint_t msr = MSR_AMD_NB_CFG;
1082 const uint64_t wabits =
1083 AMD_NB_CFG_SRQ_HEARTBEAT | AMD_NB_CFG_SRQ_SPR;
1084 int error;
1087 * Erratum 131 applies to any system with four or more cores.
1089 if (opteron_erratum_131)
1090 break;
1091 #if defined(__xpv)
1092 if (!DOMAIN_IS_INITDOMAIN(xen_info))
1093 break;
1094 if (xpv_nr_phys_cpus() < 4)
1095 break;
1096 #else
1097 if (opteron_get_nnodes() * cpuid_get_ncpu_per_chip(cpu) < 4)
1098 break;
1099 #endif
1101 * Print a warning if neither of the workarounds for
1102 * erratum 131 is present.
1104 if ((error = checked_rdmsr(msr, &nbcfg)) != 0) {
1105 msr_warning(cpu, "rd", msr, error);
1106 workaround_warning(cpu, 131);
1107 missing++;
1108 } else if ((nbcfg & wabits) == 0) {
1109 opteron_erratum_131++;
1110 } else {
1111 /* cannot have both workarounds set */
1112 ASSERT((nbcfg & wabits) != wabits);
1114 #else
1115 workaround_warning(cpu, 131);
1116 missing++;
1117 #endif
1118 /*CONSTANTCONDITION*/
1119 } while (0);
1122 * This isn't really an erratum, but for convenience the
1123 * detection/workaround code lives here and in cpuid_opteron_erratum.
1125 if (cpuid_opteron_erratum(cpu, 6336786) > 0) {
1126 #if defined(OPTERON_WORKAROUND_6336786)
1128 * Disable C1-Clock ramping on multi-core/multi-processor
1129 * K8 platforms to guard against TSC drift.
1131 if (opteron_workaround_6336786) {
1132 opteron_workaround_6336786++;
1133 #if defined(__xpv)
1134 } else if ((DOMAIN_IS_INITDOMAIN(xen_info) &&
1135 xpv_nr_phys_cpus() > 1) ||
1136 opteron_workaround_6336786_UP) {
1138 * XXPV Hmm. We can't walk the Northbridges on
1139 * the hypervisor; so just complain and drive
1140 * on. This probably needs to be fixed in
1141 * the hypervisor itself.
1143 opteron_workaround_6336786++;
1144 workaround_warning(cpu, 6336786);
1145 #else /* __xpv */
1146 } else if ((opteron_get_nnodes() *
1147 cpuid_get_ncpu_per_chip(cpu) > 1) ||
1148 opteron_workaround_6336786_UP) {
1150 uint_t node, nnodes;
1151 uint8_t data;
1153 nnodes = opteron_get_nnodes();
1154 for (node = 0; node < nnodes; node++) {
1156 * Clear PMM7[1:0] (function 3, offset 0x87)
1157 * Northbridge device is the node id + 24.
1159 data = pci_getb_func(0, node + 24, 3, 0x87);
1160 data &= 0xFC;
1161 pci_putb_func(0, node + 24, 3, 0x87, data);
1163 opteron_workaround_6336786++;
1164 #endif /* __xpv */
1166 #else
1167 workaround_warning(cpu, 6336786);
1168 missing++;
1169 #endif
1172 /*LINTED*/
1174 * Mutex primitives don't work as expected.
1176 if (cpuid_opteron_erratum(cpu, 6323525) > 0) {
1177 #if defined(OPTERON_WORKAROUND_6323525)
1179 * This problem only occurs with 2 or more cores. If bit in
1180 * MSR_AMD_BU_CFG set, then not applicable. The workaround
1181 * is to patch the semaphone routines with the lfence
1182 * instruction to provide necessary load memory barrier with
1183 * possible subsequent read-modify-write ops.
1185 * It is too early in boot to call the patch routine so
1186 * set erratum variable to be done in startup_end().
1188 if (opteron_workaround_6323525) {
1189 opteron_workaround_6323525++;
1190 #if defined(__xpv)
1191 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2)) {
1192 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1194 * XXPV Use dom0_msr here when extended
1195 * operations are supported?
1197 if (xpv_nr_phys_cpus() > 1)
1198 opteron_workaround_6323525++;
1199 } else {
1201 * We have no way to tell how many physical
1202 * cpus there are, or even if this processor
1203 * has the problem, so enable the workaround
1204 * unconditionally (at some performance cost).
1206 opteron_workaround_6323525++;
1208 #else /* __xpv */
1209 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2) &&
1210 ((opteron_get_nnodes() *
1211 cpuid_get_ncpu_per_chip(cpu)) > 1)) {
1212 if ((xrdmsr(MSR_AMD_BU_CFG) & (UINT64_C(1) << 33)) == 0)
1213 opteron_workaround_6323525++;
1214 #endif /* __xpv */
1216 #else
1217 workaround_warning(cpu, 6323525);
1218 missing++;
1219 #endif
1222 missing += do_erratum_298(cpu);
1224 if (cpuid_opteron_erratum(cpu, 721) > 0) {
1225 #if defined(OPTERON_ERRATUM_721)
1226 on_trap_data_t otd;
1228 if (!on_trap(&otd, OT_DATA_ACCESS))
1229 wrmsr(MSR_AMD_DE_CFG,
1230 rdmsr(MSR_AMD_DE_CFG) | AMD_DE_CFG_E721);
1231 no_trap();
1233 opteron_erratum_721++;
1234 #else
1235 workaround_warning(cpu, 721);
1236 missing++;
1237 #endif
1240 #ifdef __xpv
1241 return (0);
1242 #else
1243 return (missing);
1244 #endif
1247 void
1248 workaround_errata_end()
1250 #if defined(OPTERON_ERRATUM_88)
1251 if (opteron_erratum_88)
1252 workaround_applied(88);
1253 #endif
1254 #if defined(OPTERON_ERRATUM_91)
1255 if (opteron_erratum_91)
1256 workaround_applied(91);
1257 #endif
1258 #if defined(OPTERON_ERRATUM_93)
1259 if (opteron_erratum_93)
1260 workaround_applied(93);
1261 #endif
1262 #if defined(OPTERON_ERRATUM_95)
1263 if (opteron_erratum_95)
1264 workaround_applied(95);
1265 #endif
1266 #if defined(OPTERON_ERRATUM_100)
1267 if (opteron_erratum_100)
1268 workaround_applied(100);
1269 #endif
1270 #if defined(OPTERON_ERRATUM_108)
1271 if (opteron_erratum_108)
1272 workaround_applied(108);
1273 #endif
1274 #if defined(OPTERON_ERRATUM_109)
1275 if (opteron_erratum_109) {
1276 cmn_err(CE_WARN,
1277 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1278 " processor\nerratum 109 was not detected; updating your"
1279 " system's BIOS to a version\ncontaining this"
1280 " microcode patch is HIGHLY recommended or erroneous"
1281 " system\noperation may occur.\n");
1283 #endif
1284 #if defined(OPTERON_ERRATUM_121)
1285 if (opteron_erratum_121)
1286 workaround_applied(121);
1287 #endif
1288 #if defined(OPTERON_ERRATUM_122)
1289 if (opteron_erratum_122)
1290 workaround_applied(122);
1291 #endif
1292 #if defined(OPTERON_ERRATUM_123)
1293 if (opteron_erratum_123) {
1294 cmn_err(CE_WARN,
1295 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1296 " processor\nerratum 123 was not detected; updating your"
1297 " system's BIOS to a version\ncontaining this"
1298 " microcode patch is HIGHLY recommended or erroneous"
1299 " system\noperation may occur.\n");
1301 #endif
1302 #if defined(OPTERON_ERRATUM_131)
1303 if (opteron_erratum_131) {
1304 cmn_err(CE_WARN,
1305 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
1306 " processor\nerratum 131 was not detected; updating your"
1307 " system's BIOS to a version\ncontaining this"
1308 " microcode patch is HIGHLY recommended or erroneous"
1309 " system\noperation may occur.\n");
1311 #endif
1312 #if defined(OPTERON_WORKAROUND_6336786)
1313 if (opteron_workaround_6336786)
1314 workaround_applied(6336786);
1315 #endif
1316 #if defined(OPTERON_WORKAROUND_6323525)
1317 if (opteron_workaround_6323525)
1318 workaround_applied(6323525);
1319 #endif
1320 #if defined(OPTERON_ERRATUM_298)
1321 if (opteron_erratum_298) {
1322 cmn_err(CE_WARN,
1323 "BIOS microcode patch for AMD 64/Opteron(tm)"
1324 " processor\nerratum 298 was not detected; updating your"
1325 " system's BIOS to a version\ncontaining this"
1326 " microcode patch is HIGHLY recommended or erroneous"
1327 " system\noperation may occur.\n");
1329 #endif
1330 #if defined(OPTERON_ERRATUM_721)
1331 if (opteron_erratum_721)
1332 workaround_applied(721);
1333 #endif
1337 * The procset_slave and procset_master are used to synchronize
1338 * between the control CPU and the target CPU when starting CPUs.
1340 static cpuset_t procset_slave, procset_master;
1342 static void
1343 mp_startup_wait(cpuset_t *sp, processorid_t cpuid)
1345 cpuset_t tempset;
1347 for (tempset = *sp; !CPU_IN_SET(tempset, cpuid);
1348 tempset = *(volatile cpuset_t *)sp) {
1349 SMT_PAUSE();
1351 CPUSET_ATOMIC_DEL(*(cpuset_t *)sp, cpuid);
1354 static void
1355 mp_startup_signal(cpuset_t *sp, processorid_t cpuid)
1357 cpuset_t tempset;
1359 CPUSET_ATOMIC_ADD(*(cpuset_t *)sp, cpuid);
1360 for (tempset = *sp; CPU_IN_SET(tempset, cpuid);
1361 tempset = *(volatile cpuset_t *)sp) {
1362 SMT_PAUSE();
1367 mp_start_cpu_common(cpu_t *cp, boolean_t boot)
1369 _NOTE(ARGUNUSED(boot));
1371 void *ctx;
1372 int delays;
1373 int error = 0;
1374 cpuset_t tempset;
1375 processorid_t cpuid;
1376 #ifndef __xpv
1377 extern void cpupm_init(cpu_t *);
1378 #endif
1380 ASSERT(cp != NULL);
1381 cpuid = cp->cpu_id;
1382 ctx = mach_cpucontext_alloc(cp);
1383 if (ctx == NULL) {
1384 cmn_err(CE_WARN,
1385 "cpu%d: failed to allocate context", cp->cpu_id);
1386 return (EAGAIN);
1388 error = mach_cpu_start(cp, ctx);
1389 if (error != 0) {
1390 cmn_err(CE_WARN,
1391 "cpu%d: failed to start, error %d", cp->cpu_id, error);
1392 mach_cpucontext_free(cp, ctx, error);
1393 return (error);
1396 for (delays = 0, tempset = procset_slave; !CPU_IN_SET(tempset, cpuid);
1397 delays++) {
1398 if (delays == 500) {
1400 * After five seconds, things are probably looking
1401 * a bit bleak - explain the hang.
1403 cmn_err(CE_NOTE, "cpu%d: started, "
1404 "but not running in the kernel yet", cpuid);
1405 } else if (delays > 2000) {
1407 * We waited at least 20 seconds, bail ..
1409 error = ETIMEDOUT;
1410 cmn_err(CE_WARN, "cpu%d: timed out", cpuid);
1411 mach_cpucontext_free(cp, ctx, error);
1412 return (error);
1416 * wait at least 10ms, then check again..
1418 delay(USEC_TO_TICK_ROUNDUP(10000));
1419 tempset = *((volatile cpuset_t *)&procset_slave);
1421 CPUSET_ATOMIC_DEL(procset_slave, cpuid);
1423 mach_cpucontext_free(cp, ctx, 0);
1425 #ifndef __xpv
1426 if (tsc_gethrtime_enable)
1427 tsc_sync_master(cpuid);
1428 #endif
1430 if (dtrace_cpu_init != NULL) {
1431 (*dtrace_cpu_init)(cpuid);
1435 * During CPU DR operations, the cpu_lock is held by current
1436 * (the control) thread. We can't release the cpu_lock here
1437 * because that will break the CPU DR logic.
1438 * On the other hand, CPUPM and processor group initialization
1439 * routines need to access the cpu_lock. So we invoke those
1440 * routines here on behalf of mp_startup_common().
1442 * CPUPM and processor group initialization routines depend
1443 * on the cpuid probing results. Wait for mp_startup_common()
1444 * to signal that cpuid probing is done.
1446 mp_startup_wait(&procset_slave, cpuid);
1447 #ifndef __xpv
1448 cpupm_init(cp);
1449 #endif
1450 (void) pg_cpu_init(cp, B_FALSE);
1451 cpu_set_state(cp);
1452 mp_startup_signal(&procset_master, cpuid);
1454 return (0);
1458 * Start a single cpu, assuming that the kernel context is available
1459 * to successfully start another cpu.
1461 * (For example, real mode code is mapped into the right place
1462 * in memory and is ready to be run.)
1465 start_cpu(processorid_t who)
1467 cpu_t *cp;
1468 int error = 0;
1469 cpuset_t tempset;
1471 ASSERT(who != 0);
1474 * Check if there's at least a Mbyte of kmem available
1475 * before attempting to start the cpu.
1477 if (kmem_avail() < 1024 * 1024) {
1479 * Kick off a reap in case that helps us with
1480 * later attempts ..
1482 kmem_reap();
1483 return (ENOMEM);
1487 * First configure cpu.
1489 cp = mp_cpu_configure_common(who, B_TRUE);
1490 ASSERT(cp != NULL);
1493 * Then start cpu.
1495 error = mp_start_cpu_common(cp, B_TRUE);
1496 if (error != 0) {
1497 mp_cpu_unconfigure_common(cp, error);
1498 return (error);
1501 mutex_exit(&cpu_lock);
1502 tempset = cpu_ready_set;
1503 while (!CPU_IN_SET(tempset, who)) {
1504 drv_usecwait(1);
1505 tempset = *((volatile cpuset_t *)&cpu_ready_set);
1507 mutex_enter(&cpu_lock);
1509 return (0);
1512 void
1513 start_other_cpus(int cprboot)
1515 _NOTE(ARGUNUSED(cprboot));
1517 uint_t who;
1518 uint_t bootcpuid = 0;
1521 * Initialize our own cpu_info.
1523 init_cpu_info(CPU);
1525 #if !defined(__xpv)
1526 init_cpu_id_gdt(CPU);
1527 #endif
1529 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_idstr);
1530 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_brandstr);
1533 * KPTI initialisation happens very early in boot, before logging is
1534 * set up. Output a status message now as the boot CPU comes online.
1536 cmn_err(CE_CONT, "?KPTI %s (PCID %s, INVPCID %s)\n",
1537 kpti_enable ? "enabled" : "disabled",
1538 x86_use_pcid == 1 ? "in use" :
1539 (is_x86_feature(x86_featureset, X86FSET_PCID) ? "disabled" :
1540 "not supported"),
1541 x86_use_pcid == 1 && x86_use_invpcid == 1 ? "in use" :
1542 (is_x86_feature(x86_featureset, X86FSET_INVPCID) ? "disabled" :
1543 "not supported"));
1546 * Initialize our syscall handlers
1548 init_cpu_syscall(CPU);
1551 * Take the boot cpu out of the mp_cpus set because we know
1552 * it's already running. Add it to the cpu_ready_set for
1553 * precisely the same reason.
1555 CPUSET_DEL(mp_cpus, bootcpuid);
1556 CPUSET_ADD(cpu_ready_set, bootcpuid);
1559 * skip the rest of this if
1560 * . only 1 cpu dectected and system isn't hotplug-capable
1561 * . not using MP
1563 if ((CPUSET_ISNULL(mp_cpus) && plat_dr_support_cpu() == 0) ||
1564 use_mp == 0) {
1565 if (use_mp == 0)
1566 cmn_err(CE_CONT, "?***** Not in MP mode\n");
1567 goto done;
1571 * perform such initialization as is needed
1572 * to be able to take CPUs on- and off-line.
1574 cpu_pause_init();
1576 xc_init_cpu(CPU); /* initialize processor crosscalls */
1578 if (mach_cpucontext_init() != 0)
1579 goto done;
1581 flushes_require_xcalls = 1;
1584 * We lock our affinity to the master CPU to ensure that all slave CPUs
1585 * do their TSC syncs with the same CPU.
1587 affinity_set(CPU_CURRENT);
1589 for (who = 0; who < NCPU; who++) {
1590 if (!CPU_IN_SET(mp_cpus, who))
1591 continue;
1592 ASSERT(who != bootcpuid);
1594 mutex_enter(&cpu_lock);
1595 if (start_cpu(who) != 0)
1596 CPUSET_DEL(mp_cpus, who);
1597 cpu_state_change_notify(who, CPU_SETUP);
1598 mutex_exit(&cpu_lock);
1601 /* Free the space allocated to hold the microcode file */
1602 ucode_cleanup();
1604 affinity_clear();
1606 mach_cpucontext_fini();
1608 done:
1609 if (get_hwenv() == HW_NATIVE)
1610 workaround_errata_end();
1611 cmi_post_mpstartup();
1613 if (use_mp && ncpus != boot_max_ncpus) {
1614 cmn_err(CE_NOTE,
1615 "System detected %d cpus, but "
1616 "only %d cpu(s) were enabled during boot.",
1617 boot_max_ncpus, ncpus);
1618 cmn_err(CE_NOTE,
1619 "Use \"boot-ncpus\" parameter to enable more CPU(s). "
1620 "See eeprom(1M).");
1625 mp_cpu_configure(int cpuid)
1627 cpu_t *cp;
1629 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
1630 return (ENOTSUP);
1633 cp = cpu_get(cpuid);
1634 if (cp != NULL) {
1635 return (EALREADY);
1639 * Check if there's at least a Mbyte of kmem available
1640 * before attempting to start the cpu.
1642 if (kmem_avail() < 1024 * 1024) {
1644 * Kick off a reap in case that helps us with
1645 * later attempts ..
1647 kmem_reap();
1648 return (ENOMEM);
1651 cp = mp_cpu_configure_common(cpuid, B_FALSE);
1652 ASSERT(cp != NULL && cpu_get(cpuid) == cp);
1654 return (cp != NULL ? 0 : EAGAIN);
1658 mp_cpu_unconfigure(int cpuid)
1660 cpu_t *cp;
1662 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
1663 return (ENOTSUP);
1664 } else if (cpuid < 0 || cpuid >= max_ncpus) {
1665 return (EINVAL);
1668 cp = cpu_get(cpuid);
1669 if (cp == NULL) {
1670 return (ENODEV);
1672 mp_cpu_unconfigure_common(cp, 0);
1674 return (0);
1678 * Startup function for 'other' CPUs (besides boot cpu).
1679 * Called from real_mode_start.
1681 * WARNING: until CPU_READY is set, mp_startup_common and routines called by
1682 * mp_startup_common should not call routines (e.g. kmem_free) that could call
1683 * hat_unload which requires CPU_READY to be set.
1685 static void
1686 mp_startup_common(boolean_t boot)
1688 cpu_t *cp = CPU;
1689 uchar_t new_x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
1690 extern void cpu_event_init_cpu(cpu_t *);
1693 * We need to get TSC on this proc synced (i.e., any delta
1694 * from cpu0 accounted for) as soon as we can, because many
1695 * many things use gethrtime/pc_gethrestime, including
1696 * interrupts, cmn_err, etc. Before we can do that, we want to
1697 * clear TSC if we're on a buggy Sandy/Ivy Bridge CPU, so do that
1698 * right away.
1700 bzero(new_x86_featureset, BT_SIZEOFMAP(NUM_X86_FEATURES));
1701 cpuid_pass1(cp, new_x86_featureset);
1703 if (boot && get_hwenv() == HW_NATIVE &&
1704 cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
1705 cpuid_getfamily(CPU) == 6 &&
1706 (cpuid_getmodel(CPU) == 0x2d || cpuid_getmodel(CPU) == 0x3e) &&
1707 is_x86_feature(new_x86_featureset, X86FSET_TSC)) {
1708 (void) wrmsr(REG_TSC, 0UL);
1711 /* Let the control CPU continue into tsc_sync_master() */
1712 mp_startup_signal(&procset_slave, cp->cpu_id);
1714 #ifndef __xpv
1715 if (tsc_gethrtime_enable)
1716 tsc_sync_slave();
1717 #endif
1720 * Once this was done from assembly, but it's safer here; if
1721 * it blocks, we need to be able to swtch() to and from, and
1722 * since we get here by calling t_pc, we need to do that call
1723 * before swtch() overwrites it.
1725 (void) (*ap_mlsetup)();
1727 #ifndef __xpv
1729 * Program this cpu's PAT
1731 pat_sync();
1732 #endif
1735 * Set up TSC_AUX to contain the cpuid for this processor
1736 * for the rdtscp instruction.
1738 if (is_x86_feature(x86_featureset, X86FSET_TSCP))
1739 (void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id);
1742 * Initialize this CPU's syscall handlers
1744 init_cpu_syscall(cp);
1747 * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the
1748 * highest level at which a routine is permitted to block on
1749 * an adaptive mutex (allows for cpu poke interrupt in case
1750 * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks
1751 * device interrupts that may end up in the hat layer issuing cross
1752 * calls before CPU_READY is set.
1754 splx(ipltospl(LOCK_LEVEL));
1755 sti();
1758 * Do a sanity check to make sure this new CPU is a sane thing
1759 * to add to the collection of processors running this system.
1761 * XXX Clearly this needs to get more sophisticated, if x86
1762 * systems start to get built out of heterogenous CPUs; as is
1763 * likely to happen once the number of processors in a configuration
1764 * gets large enough.
1766 if (compare_x86_featureset(x86_featureset, new_x86_featureset) ==
1767 B_FALSE) {
1768 cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
1769 print_x86_featureset(new_x86_featureset);
1770 cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
1774 * There exists a small subset of systems which expose differing
1775 * MWAIT/MONITOR support between CPUs. If MWAIT support is absent from
1776 * the boot CPU, but is found on a later CPU, the system continues to
1777 * operate as if no MWAIT support is available.
1779 * The reverse case, where MWAIT is available on the boot CPU but not
1780 * on a subsequently initialized CPU, is not presently allowed and will
1781 * result in a panic.
1783 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) !=
1784 is_x86_feature(new_x86_featureset, X86FSET_MWAIT)) {
1785 if (!is_x86_feature(x86_featureset, X86FSET_MWAIT)) {
1786 remove_x86_feature(new_x86_featureset, X86FSET_MWAIT);
1787 } else {
1788 panic("unsupported mixed cpu mwait support detected");
1793 * We could be more sophisticated here, and just mark the CPU
1794 * as "faulted" but at this point we'll opt for the easier
1795 * answer of dying horribly. Provided the boot cpu is ok,
1796 * the system can be recovered by booting with use_mp set to zero.
1798 if (workaround_errata(cp) != 0)
1799 panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
1802 * We can touch cpu_flags here without acquiring the cpu_lock here
1803 * because the cpu_lock is held by the control CPU which is running
1804 * mp_start_cpu_common().
1805 * Need to clear CPU_QUIESCED flag before calling any function which
1806 * may cause thread context switching, such as kmem_alloc() etc.
1807 * The idle thread checks for CPU_QUIESCED flag and loops for ever if
1808 * it's set. So the startup thread may have no chance to switch back
1809 * again if it's switched away with CPU_QUIESCED set.
1811 cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED);
1813 enable_pcid();
1816 * Setup this processor for XSAVE.
1818 if (fp_save_mech == FP_XSAVE) {
1819 xsave_setup_msr(cp);
1822 cpuid_pass2(cp);
1823 cpuid_pass3(cp);
1824 cpuid_pass4(cp, NULL);
1827 * Correct cpu_idstr and cpu_brandstr on target CPU after
1828 * cpuid_pass1() is done.
1830 (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN);
1831 (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN);
1833 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS;
1835 post_startup_cpu_fixups();
1837 cpu_event_init_cpu(cp);
1840 * Enable preemption here so that contention for any locks acquired
1841 * later in mp_startup_common may be preempted if the thread owning
1842 * those locks is continuously executing on other CPUs (for example,
1843 * this CPU must be preemptible to allow other CPUs to pause it during
1844 * their startup phases). It's safe to enable preemption here because
1845 * the CPU state is pretty-much fully constructed.
1847 curthread->t_preempt = 0;
1849 /* The base spl should still be at LOCK LEVEL here */
1850 ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL));
1851 set_base_spl(); /* Restore the spl to its proper value */
1853 pghw_physid_create(cp);
1855 * Delegate initialization tasks, which need to access the cpu_lock,
1856 * to mp_start_cpu_common() because we can't acquire the cpu_lock here
1857 * during CPU DR operations.
1859 mp_startup_signal(&procset_slave, cp->cpu_id);
1860 mp_startup_wait(&procset_master, cp->cpu_id);
1861 pg_cmt_cpu_startup(cp);
1863 if (boot) {
1864 mutex_enter(&cpu_lock);
1865 cp->cpu_flags &= ~CPU_OFFLINE;
1866 cpu_enable_intr(cp);
1867 cpu_add_active(cp);
1868 mutex_exit(&cpu_lock);
1871 /* Enable interrupts */
1872 (void) spl0();
1875 * Fill out cpu_ucode_info. Update microcode if necessary.
1877 ucode_check(cp);
1879 #ifndef __xpv
1882 * Set up the CPU module for this CPU. This can't be done
1883 * before this CPU is made CPU_READY, because we may (in
1884 * heterogeneous systems) need to go load another CPU module.
1885 * The act of attempting to load a module may trigger a
1886 * cross-call, which will ASSERT unless this cpu is CPU_READY.
1888 cmi_hdl_t hdl;
1890 if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU),
1891 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) != NULL) {
1892 if (is_x86_feature(x86_featureset, X86FSET_MCA))
1893 cmi_mca_init(hdl);
1894 cp->cpu_m.mcpu_cmi_hdl = hdl;
1897 #endif /* __xpv */
1899 if (boothowto & RB_DEBUG)
1900 kdi_cpu_init();
1903 * Setting the bit in cpu_ready_set must be the last operation in
1904 * processor initialization; the boot CPU will continue to boot once
1905 * it sees this bit set for all active CPUs.
1907 CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id);
1909 (void) mach_cpu_create_device_node(cp, NULL);
1911 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr);
1912 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr);
1913 cmn_err(CE_CONT, "?cpu%d initialization complete - online\n",
1914 cp->cpu_id);
1917 * Now we are done with the startup thread, so free it up.
1919 thread_exit();
1920 panic("mp_startup: cannot return");
1921 /*NOTREACHED*/
1925 * Startup function for 'other' CPUs at boot time (besides boot cpu).
1927 static void
1928 mp_startup_boot(void)
1930 mp_startup_common(B_TRUE);
1934 * Startup function for hotplug CPUs at runtime.
1936 void
1937 mp_startup_hotplug(void)
1939 mp_startup_common(B_FALSE);
1943 * Start CPU on user request.
1945 /* ARGSUSED */
1947 mp_cpu_start(struct cpu *cp)
1949 ASSERT(MUTEX_HELD(&cpu_lock));
1950 return (0);
1954 * Stop CPU on user request.
1957 mp_cpu_stop(struct cpu *cp)
1959 extern int cbe_psm_timer_mode;
1960 ASSERT(MUTEX_HELD(&cpu_lock));
1962 #ifdef __xpv
1964 * We can't offline vcpu0.
1966 if (cp->cpu_id == 0)
1967 return (EBUSY);
1968 #endif
1971 * If TIMER_PERIODIC mode is used, CPU0 is the one running it;
1972 * can't stop it. (This is true only for machines with no TSC.)
1975 if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0))
1976 return (EBUSY);
1978 return (0);
1982 * Take the specified CPU out of participation in interrupts.
1985 cpu_disable_intr(struct cpu *cp)
1987 if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS)
1988 return (EBUSY);
1990 cp->cpu_flags &= ~CPU_ENABLE;
1991 return (0);
1995 * Allow the specified CPU to participate in interrupts.
1997 void
1998 cpu_enable_intr(struct cpu *cp)
2000 ASSERT(MUTEX_HELD(&cpu_lock));
2001 cp->cpu_flags |= CPU_ENABLE;
2002 psm_enable_intr(cp->cpu_id);
2005 void
2006 mp_cpu_faulted_enter(struct cpu *cp)
2008 #ifdef __xpv
2009 _NOTE(ARGUNUSED(cp));
2010 #else
2011 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
2013 if (hdl != NULL) {
2014 cmi_hdl_hold(hdl);
2015 } else {
2016 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
2017 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
2019 if (hdl != NULL) {
2020 cmi_faulted_enter(hdl);
2021 cmi_hdl_rele(hdl);
2023 #endif
2026 void
2027 mp_cpu_faulted_exit(struct cpu *cp)
2029 #ifdef __xpv
2030 _NOTE(ARGUNUSED(cp));
2031 #else
2032 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl;
2034 if (hdl != NULL) {
2035 cmi_hdl_hold(hdl);
2036 } else {
2037 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
2038 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
2040 if (hdl != NULL) {
2041 cmi_faulted_exit(hdl);
2042 cmi_hdl_rele(hdl);
2044 #endif
2048 * The following two routines are used as context operators on threads belonging
2049 * to processes with a private LDT (see sysi86). Due to the rarity of such
2050 * processes, these routines are currently written for best code readability and
2051 * organization rather than speed. We could avoid checking x86_featureset at
2052 * every context switch by installing different context ops, depending on
2053 * x86_featureset, at LDT creation time -- one for each combination of fast
2054 * syscall features.
2057 /*ARGSUSED*/
2058 void
2059 cpu_fast_syscall_disable(void *arg)
2061 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2062 is_x86_feature(x86_featureset, X86FSET_SEP))
2063 cpu_sep_disable();
2064 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2065 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2066 cpu_asysc_disable();
2069 /*ARGSUSED*/
2070 void
2071 cpu_fast_syscall_enable(void *arg)
2073 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2074 is_x86_feature(x86_featureset, X86FSET_SEP))
2075 cpu_sep_enable();
2076 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2077 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2078 cpu_asysc_enable();
2081 static void
2082 cpu_sep_enable(void)
2084 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2085 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2087 wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
2090 static void
2091 cpu_sep_disable(void)
2093 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2094 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2097 * Setting the SYSENTER_CS_MSR register to 0 causes software executing
2098 * the sysenter or sysexit instruction to trigger a #gp fault.
2100 wrmsr(MSR_INTC_SEP_CS, 0);
2103 static void
2104 cpu_asysc_enable(void)
2106 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
2107 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2109 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
2110 (uint64_t)(uintptr_t)AMD_EFER_SCE);
2113 static void
2114 cpu_asysc_disable(void)
2116 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
2117 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2120 * Turn off the SCE (syscall enable) bit in the EFER register. Software
2121 * executing syscall or sysret with this bit off will incur a #ud trap.
2123 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) &
2124 ~((uint64_t)(uintptr_t)AMD_EFER_SCE));