kernel - Fix lockup due to recentn pmap change
[dragonfly.git] / sys / platform / pc64 / x86_64 / initcpu.c
blobf941ab2f3e7dcf1fe65b5f265fe7129ef286f213
1 /*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 * Copyright (c) 2008 The DragonFly Project.
4 *
5 * All rights reserved. Unpublished rights reserved under the copyright
6 * laws of Japan.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer as
14 * the first lines of this file unmodified.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_cpu.h"
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/sysctl.h>
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 #include <machine/smp.h>
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
46 static int hw_instruction_sse;
47 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
48 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
50 int cpu_type; /* Are we 386, 386sx, 486, etc? */
51 u_int cpu_feature; /* Feature flags */
52 u_int cpu_feature2; /* Feature flags */
53 u_int amd_feature; /* AMD feature flags */
54 u_int amd_feature2; /* AMD feature flags */
55 u_int via_feature_rng; /* VIA RNG features */
56 u_int via_feature_xcrypt; /* VIA ACE features */
57 u_int cpu_high; /* Highest arg to CPUID */
58 u_int cpu_exthigh; /* Highest arg to extended CPUID */
59 u_int cpu_id; /* Stepping ID */
60 u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */
61 u_int cpu_procinfo2; /* Multicore info */
62 char cpu_vendor[20]; /* CPU Origin code */
63 u_int cpu_vendor_id; /* CPU vendor ID */
64 u_int cpu_fxsr; /* SSE enabled */
65 u_int cpu_xsave; /* AVX enabled by OS*/
66 u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */
67 u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */
68 u_int cpu_stdext_feature;
69 u_int cpu_thermal_feature;
70 u_int cpu_mwait_feature;
71 u_int cpu_mwait_extemu;
74 * -1: automatic (enable on h/w, disable on VMs)
75 * 0: disable
76 * 1: enable (where available)
78 static int hw_clflush_enable = -1;
80 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0,
81 "");
83 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
84 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
85 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
86 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
89 * Initialize special VIA C3/C7 features
91 static void
92 init_via(void)
94 u_int regs[4], val;
95 u_int64_t msreg;
97 do_cpuid(0xc0000000, regs);
98 val = regs[0];
99 if (val >= 0xc0000001) {
100 do_cpuid(0xc0000001, regs);
101 val = regs[3];
102 } else
103 val = 0;
105 /* Enable RNG if present and disabled */
106 if (val & VIA_CPUID_HAS_RNG) {
107 if (!(val & VIA_CPUID_DO_RNG)) {
108 msreg = rdmsr(0x110B);
109 msreg |= 0x40;
110 wrmsr(0x110B, msreg);
112 via_feature_rng = VIA_HAS_RNG;
114 /* Enable AES engine if present and disabled */
115 if (val & VIA_CPUID_HAS_ACE) {
116 if (!(val & VIA_CPUID_DO_ACE)) {
117 msreg = rdmsr(0x1107);
118 msreg |= (0x01 << 28);
119 wrmsr(0x1107, msreg);
121 via_feature_xcrypt |= VIA_HAS_AES;
123 /* Enable ACE2 engine if present and disabled */
124 if (val & VIA_CPUID_HAS_ACE2) {
125 if (!(val & VIA_CPUID_DO_ACE2)) {
126 msreg = rdmsr(0x1107);
127 msreg |= (0x01 << 28);
128 wrmsr(0x1107, msreg);
130 via_feature_xcrypt |= VIA_HAS_AESCTR;
132 /* Enable SHA engine if present and disabled */
133 if (val & VIA_CPUID_HAS_PHE) {
134 if (!(val & VIA_CPUID_DO_PHE)) {
135 msreg = rdmsr(0x1107);
136 msreg |= (0x01 << 28/**/);
137 wrmsr(0x1107, msreg);
139 via_feature_xcrypt |= VIA_HAS_SHA;
141 /* Enable MM engine if present and disabled */
142 if (val & VIA_CPUID_HAS_PMM) {
143 if (!(val & VIA_CPUID_DO_PMM)) {
144 msreg = rdmsr(0x1107);
145 msreg |= (0x01 << 28/**/);
146 wrmsr(0x1107, msreg);
148 via_feature_xcrypt |= VIA_HAS_MM;
152 static enum vmm_guest_type
153 detect_vmm(void)
155 enum vmm_guest_type guest;
156 char vendor[16];
159 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
160 * http://lkml.org/lkml/2008/10/1/246
162 * KB1009458: Mechanisms to determine if software is running in
163 * a VMware virtual machine
164 * http://kb.vmware.com/kb/1009458
166 if (cpu_feature2 & CPUID2_VMM) {
167 u_int regs[4];
169 do_cpuid(0x40000000, regs);
170 ((u_int *)&vendor)[0] = regs[1];
171 ((u_int *)&vendor)[1] = regs[2];
172 ((u_int *)&vendor)[2] = regs[3];
173 vendor[12] = '\0';
174 if (regs[0] >= 0x40000000) {
175 memcpy(vmm_vendor, vendor, 13);
176 if (strcmp(vmm_vendor, "VMwareVMware") == 0)
177 return VMM_GUEST_VMWARE;
178 else if (strcmp(vmm_vendor, "Microsoft Hv") == 0)
179 return VMM_GUEST_HYPERV;
180 else if (strcmp(vmm_vendor, "KVMKVMKVM") == 0)
181 return VMM_GUEST_KVM;
182 } else if (regs[0] == 0) {
183 /* Also detect old KVM versions with regs[0] == 0 */
184 if (strcmp(vendor, "KVMKVMKVM") == 0) {
185 memcpy(vmm_vendor, vendor, 13);
186 return VMM_GUEST_KVM;
191 guest = detect_virtual();
192 if (guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM))
193 guest = VMM_GUEST_UNKNOWN;
194 return guest;
198 * Initialize CPU control registers
200 void
201 initializecpu(int cpu)
203 uint64_t msr;
205 /*Check for FXSR and SSE support and enable if available.*/
206 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
207 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
208 cpu_fxsr = hw_instruction_sse = 1;
211 if (cpu == 0) {
212 /* Check if we are running in a hypervisor. */
213 vmm_guest = detect_vmm();
216 #if !defined(CPU_DISABLE_AVX)
217 /*Check for XSAVE and AVX support and enable if available.*/
218 if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE)
219 && (cpu_feature & CPUID_SSE)) {
220 load_cr4(rcr4() | CR4_XSAVE);
222 /* Adjust size of savefpu in npx.h before adding to mask.*/
223 xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0);
224 cpu_xsave = 1;
226 #endif
228 if (cpu_vendor_id == CPU_VENDOR_AMD) {
229 switch((cpu_id & 0xFF0000)) {
230 case 0x100000:
231 case 0x120000:
233 * Errata 721 is the cpu bug found by your's truly
234 * (Matthew Dillon). It is a bug where a sequence
235 * of 5 or more popq's + a retq, under involved
236 * deep recursion circumstances, can cause the %rsp
237 * to not be properly updated, almost always
238 * resulting in a seg-fault soon after.
240 * Do not install the workaround when we are running
241 * in a virtual machine.
243 if (vmm_guest)
244 break;
246 msr = rdmsr(MSR_AMD_DE_CFG);
247 if ((msr & 1) == 0) {
248 if (cpu == 0)
249 kprintf("Errata 721 workaround "
250 "installed\n");
251 msr |= 1;
252 wrmsr(MSR_AMD_DE_CFG, msr);
254 break;
258 * Work around Erratum 793: Specific Combination of Writes
259 * to Write Combined Memory Types and Locked Instructions
260 * May Cause Core Hang. See Revision Guide for AMD Family
261 * 16h Models 00h-0Fh Processors, revision 3.04 or later,
262 * publication 51810.
264 if (CPUID_TO_FAMILY(cpu_id) == 0x16 &&
265 CPUID_TO_MODEL(cpu_id) <= 0xf) {
266 if ((cpu_feature2 & CPUID2_VMM) == 0) {
267 msr = rdmsr(0xc0011020);
268 msr |= (uint64_t)1 << 15;
269 wrmsr(0xc0011020, msr);
274 if ((amd_feature & AMDID_NX) != 0) {
275 msr = rdmsr(MSR_EFER) | EFER_NXE;
276 wrmsr(MSR_EFER, msr);
277 #if 0 /* JG */
278 pg_nx = PG_NX;
279 #endif
281 if (cpu_vendor_id == CPU_VENDOR_CENTAUR &&
282 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
283 CPUID_TO_MODEL(cpu_id) >= 0xf)
284 init_via();
286 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
287 if (cpu_feature & CPUID_CLFSH) {
288 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
290 if (hw_clflush_enable == 0 ||
291 ((hw_clflush_enable == -1) && vmm_guest))
292 cpu_feature &= ~CPUID_CLFSH;