kernel - Implement support for SMAP and SMEP security
[dragonfly.git] / sys / platform / pc64 / x86_64 / initcpu.c
blobfd20657b99aa0ddfb291fc2aae3322fee1c8fc41
1 /*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 * Copyright (c) 2008 The DragonFly Project.
4 *
5 * All rights reserved. Unpublished rights reserved under the copyright
6 * laws of Japan.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer as
14 * the first lines of this file unmodified.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "opt_cpu.h"
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/sysctl.h>
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 #include <machine/smp.h>
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
46 extern int i8254_cputimer_disable;
48 static int tsc_ignore_cpuid = 0;
49 TUNABLE_INT("hw.tsc_ignore_cpuid", &tsc_ignore_cpuid);
51 static int hw_instruction_sse;
52 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
53 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
55 int cpu_type; /* XXX CPU_CLAWHAMMER */
56 u_int cpu_feature; /* Feature flags */
57 u_int cpu_feature2; /* Feature flags */
58 u_int amd_feature; /* AMD feature flags */
59 u_int amd_feature2; /* AMD feature flags */
60 u_int via_feature_rng; /* VIA RNG features */
61 u_int via_feature_xcrypt; /* VIA ACE features */
62 u_int cpu_high; /* Highest arg to CPUID */
63 u_int cpu_exthigh; /* Highest arg to extended CPUID */
64 u_int cpu_id; /* Stepping ID */
65 u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */
66 u_int cpu_procinfo2; /* Multicore info */
67 char cpu_vendor[20]; /* CPU Origin code */
68 u_int cpu_vendor_id; /* CPU vendor ID */
69 u_int cpu_fxsr; /* SSE enabled */
70 u_int cpu_xsave; /* AVX enabled by OS*/
71 u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */
72 u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */
73 u_int cpu_stdext_feature;
74 u_int cpu_thermal_feature;
75 u_int cpu_mwait_feature;
76 u_int cpu_mwait_extemu;
79 * -1: automatic (enable on h/w, disable on VMs)
80 * 0: disable
81 * 1: enable (where available)
83 static int hw_clflush_enable = -1;
85 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0,
86 "");
88 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
89 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
90 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
91 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
94 * Initialize special VIA C3/C7 features
96 static void
97 init_via(void)
99 u_int regs[4], val;
100 u_int64_t msreg;
102 do_cpuid(0xc0000000, regs);
103 val = regs[0];
104 if (val >= 0xc0000001) {
105 do_cpuid(0xc0000001, regs);
106 val = regs[3];
107 } else
108 val = 0;
110 /* Enable RNG if present and disabled */
111 if (val & VIA_CPUID_HAS_RNG) {
112 if (!(val & VIA_CPUID_DO_RNG)) {
113 msreg = rdmsr(0x110B);
114 msreg |= 0x40;
115 wrmsr(0x110B, msreg);
117 via_feature_rng = VIA_HAS_RNG;
119 /* Enable AES engine if present and disabled */
120 if (val & VIA_CPUID_HAS_ACE) {
121 if (!(val & VIA_CPUID_DO_ACE)) {
122 msreg = rdmsr(0x1107);
123 msreg |= (0x01 << 28);
124 wrmsr(0x1107, msreg);
126 via_feature_xcrypt |= VIA_HAS_AES;
128 /* Enable ACE2 engine if present and disabled */
129 if (val & VIA_CPUID_HAS_ACE2) {
130 if (!(val & VIA_CPUID_DO_ACE2)) {
131 msreg = rdmsr(0x1107);
132 msreg |= (0x01 << 28);
133 wrmsr(0x1107, msreg);
135 via_feature_xcrypt |= VIA_HAS_AESCTR;
137 /* Enable SHA engine if present and disabled */
138 if (val & VIA_CPUID_HAS_PHE) {
139 if (!(val & VIA_CPUID_DO_PHE)) {
140 msreg = rdmsr(0x1107);
141 msreg |= (0x01 << 28/**/);
142 wrmsr(0x1107, msreg);
144 via_feature_xcrypt |= VIA_HAS_SHA;
146 /* Enable MM engine if present and disabled */
147 if (val & VIA_CPUID_HAS_PMM) {
148 if (!(val & VIA_CPUID_DO_PMM)) {
149 msreg = rdmsr(0x1107);
150 msreg |= (0x01 << 28/**/);
151 wrmsr(0x1107, msreg);
153 via_feature_xcrypt |= VIA_HAS_MM;
157 static enum vmm_guest_type
158 detect_vmm(void)
160 enum vmm_guest_type guest;
161 char vendor[16];
164 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
165 * http://lkml.org/lkml/2008/10/1/246
167 * KB1009458: Mechanisms to determine if software is running in
168 * a VMware virtual machine
169 * http://kb.vmware.com/kb/1009458
171 if (cpu_feature2 & CPUID2_VMM) {
172 u_int regs[4];
174 do_cpuid(0x40000000, regs);
175 ((u_int *)&vendor)[0] = regs[1];
176 ((u_int *)&vendor)[1] = regs[2];
177 ((u_int *)&vendor)[2] = regs[3];
178 vendor[12] = '\0';
179 if (regs[0] >= 0x40000000) {
180 memcpy(vmm_vendor, vendor, 13);
181 if (strcmp(vmm_vendor, "VMwareVMware") == 0)
182 return VMM_GUEST_VMWARE;
183 else if (strcmp(vmm_vendor, "Microsoft Hv") == 0)
184 return VMM_GUEST_HYPERV;
185 else if (strcmp(vmm_vendor, "KVMKVMKVM") == 0)
186 return VMM_GUEST_KVM;
187 } else if (regs[0] == 0) {
188 /* Also detect old KVM versions with regs[0] == 0 */
189 if (strcmp(vendor, "KVMKVMKVM") == 0) {
190 memcpy(vmm_vendor, vendor, 13);
191 return VMM_GUEST_KVM;
196 guest = detect_virtual();
197 if (guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM))
198 guest = VMM_GUEST_UNKNOWN;
199 return guest;
203 * Initialize CPU control registers
205 void
206 initializecpu(int cpu)
208 uint64_t msr;
211 * Check for FXSR and SSE support and enable if available
213 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
214 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
215 cpu_fxsr = hw_instruction_sse = 1;
218 if (cpu == 0) {
219 /* Check if we are running in a hypervisor. */
220 vmm_guest = detect_vmm();
223 #if !defined(CPU_DISABLE_AVX)
224 /*Check for XSAVE and AVX support and enable if available.*/
225 if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE)
226 && (cpu_feature & CPUID_SSE)) {
227 load_cr4(rcr4() | CR4_XSAVE);
229 /* Adjust size of savefpu in npx.h before adding to mask.*/
230 xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0);
231 cpu_xsave = 1;
233 #endif
235 if (cpu_vendor_id == CPU_VENDOR_AMD) {
236 switch((cpu_id & 0xFF0000)) {
237 case 0x100000:
238 case 0x120000:
240 * Errata 721 is the cpu bug found by your's truly
241 * (Matthew Dillon). It is a bug where a sequence
242 * of 5 or more popq's + a retq, under involved
243 * deep recursion circumstances, can cause the %rsp
244 * to not be properly updated, almost always
245 * resulting in a seg-fault soon after.
247 * Do not install the workaround when we are running
248 * in a virtual machine.
250 if (vmm_guest)
251 break;
253 msr = rdmsr(MSR_AMD_DE_CFG);
254 if ((msr & 1) == 0) {
255 if (cpu == 0)
256 kprintf("Errata 721 workaround "
257 "installed\n");
258 msr |= 1;
259 wrmsr(MSR_AMD_DE_CFG, msr);
261 break;
265 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should
266 * per BKDG. So, do it here or otherwise some tools could
267 * be confused by Initial Local APIC ID reported with
268 * CPUID Function 1 in EBX.
270 if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
271 if ((cpu_feature2 & CPUID2_VMM) == 0) {
272 msr = rdmsr(0xc001001f);
273 msr |= (uint64_t)1 << 54;
274 wrmsr(0xc001001f, msr);
279 * BIOS may configure Family 10h processors to convert
280 * WC+ cache type to CD. That can hurt performance of
281 * guest VMs using nested paging.
283 * The relevant MSR bit is not documented in the BKDG,
284 * the fix is borrowed from Linux.
286 if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
287 if ((cpu_feature2 & CPUID2_VMM) == 0) {
288 msr = rdmsr(0xc001102a);
289 msr &= ~((uint64_t)1 << 24);
290 wrmsr(0xc001102a, msr);
295 * Work around Erratum 793: Specific Combination of Writes
296 * to Write Combined Memory Types and Locked Instructions
297 * May Cause Core Hang. See Revision Guide for AMD Family
298 * 16h Models 00h-0Fh Processors, revision 3.04 or later,
299 * publication 51810.
301 if (CPUID_TO_FAMILY(cpu_id) == 0x16 &&
302 CPUID_TO_MODEL(cpu_id) <= 0xf) {
303 if ((cpu_feature2 & CPUID2_VMM) == 0) {
304 msr = rdmsr(0xc0011020);
305 msr |= (uint64_t)1 << 15;
306 wrmsr(0xc0011020, msr);
311 if ((amd_feature & AMDID_NX) != 0) {
312 msr = rdmsr(MSR_EFER) | EFER_NXE;
313 wrmsr(MSR_EFER, msr);
314 #if 0 /* JG */
315 pg_nx = PG_NX;
316 #endif
318 if (cpu_vendor_id == CPU_VENDOR_CENTAUR &&
319 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
320 CPUID_TO_MODEL(cpu_id) >= 0xf)
321 init_via();
323 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
324 if (cpu_feature & CPUID_CLFSH) {
325 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
327 if (hw_clflush_enable == 0 ||
328 ((hw_clflush_enable == -1) && vmm_guest))
329 cpu_feature &= ~CPUID_CLFSH;
332 /* Set TSC_AUX register to the cpuid, for using rdtscp in userland. */
333 if ((amd_feature & AMDID_RDTSCP) != 0)
334 wrmsr(MSR_TSCAUX, cpu);
338 * This method should be at least as good as calibrating the TSC based on the
339 * HPET timer, since the HPET runs with the core crystal clock apparently.
341 static void
342 detect_tsc_frequency(void)
344 int cpu_family, cpu_model;
345 u_int regs[4];
346 uint64_t crystal = 0;
348 cpu_model = CPUID_TO_MODEL(cpu_id);
349 cpu_family = CPUID_TO_FAMILY(cpu_id);
351 if (cpu_vendor_id != CPU_VENDOR_INTEL)
352 return;
354 if (cpu_high < 0x15)
355 return;
357 do_cpuid(0x15, regs);
358 if (regs[0] == 0 || regs[1] == 0)
359 return;
361 if (regs[2] == 0) {
362 /* For some families the SDM contains the core crystal clock. */
363 if (cpu_family == 0x6) {
364 switch (cpu_model) {
365 case 0x55: /* Xeon Scalable */
366 crystal = 25000000; /* 25 MHz */
367 break;
368 /* Skylake */
369 case 0x4e:
370 case 0x5e:
371 /* Kabylake/Coffeelake */
372 case 0x8e:
373 case 0x9e:
374 crystal = 24000000; /* 24 MHz */
375 break;
376 case 0x5c: /* Goldmont Atom */
377 crystal = 19200000; /* 19.2 MHz */
378 break;
379 default:
380 break;
383 } else {
384 crystal = regs[2];
387 if (crystal == 0)
388 return;
390 kprintf("TSC crystal clock: %ju Hz, TSC/crystal ratio: %u/%u\n",
391 crystal, regs[1], regs[0]);
393 if (tsc_ignore_cpuid == 0) {
394 tsc_frequency = (crystal * regs[1]) / regs[0];
395 i8254_cputimer_disable = 1;
399 TIMECOUNTER_INIT(cpuid_tsc_frequency, detect_tsc_frequency);