Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream-sev' into staging
[qemu.git] / target / i386 / cpu.c
blob6bb4ce87194ecf62f3424617447c5ed60634839d
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
57 #include "disas/capstone.h"
60 /* Cache topology CPUID constants: */
62 /* CPUID Leaf 2 Descriptors */
64 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
65 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
66 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
67 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
70 /* CPUID Leaf 4 constants: */
72 /* EAX: */
73 #define CPUID_4_TYPE_DCACHE 1
74 #define CPUID_4_TYPE_ICACHE 2
75 #define CPUID_4_TYPE_UNIFIED 3
77 #define CPUID_4_LEVEL(l) ((l) << 5)
79 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
80 #define CPUID_4_FULLY_ASSOC (1 << 9)
82 /* EDX: */
83 #define CPUID_4_NO_INVD_SHARING (1 << 0)
84 #define CPUID_4_INCLUSIVE (1 << 1)
85 #define CPUID_4_COMPLEX_IDX (1 << 2)
87 #define ASSOC_FULL 0xFF
89 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
90 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == 2 ? 0x2 : \
92 a == 4 ? 0x4 : \
93 a == 8 ? 0x6 : \
94 a == 16 ? 0x8 : \
95 a == 32 ? 0xA : \
96 a == 48 ? 0xB : \
97 a == 64 ? 0xC : \
98 a == 96 ? 0xD : \
99 a == 128 ? 0xE : \
100 a == ASSOC_FULL ? 0xF : \
101 0 /* invalid value */)
104 /* Definitions of the hardcoded cache entries we expose: */
106 /* L1 data cache: */
107 #define L1D_LINE_SIZE 64
108 #define L1D_ASSOCIATIVITY 8
109 #define L1D_SETS 64
110 #define L1D_PARTITIONS 1
111 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
112 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
113 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
114 #define L1D_LINES_PER_TAG 1
115 #define L1D_SIZE_KB_AMD 64
116 #define L1D_ASSOCIATIVITY_AMD 2
118 /* L1 instruction cache: */
119 #define L1I_LINE_SIZE 64
120 #define L1I_ASSOCIATIVITY 8
121 #define L1I_SETS 64
122 #define L1I_PARTITIONS 1
123 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
124 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
125 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
126 #define L1I_LINES_PER_TAG 1
127 #define L1I_SIZE_KB_AMD 64
128 #define L1I_ASSOCIATIVITY_AMD 2
130 /* Level 2 unified cache: */
131 #define L2_LINE_SIZE 64
132 #define L2_ASSOCIATIVITY 16
133 #define L2_SETS 4096
134 #define L2_PARTITIONS 1
135 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
136 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
137 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
138 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
139 #define L2_LINES_PER_TAG 1
140 #define L2_SIZE_KB_AMD 512
142 /* Level 3 unified cache: */
143 #define L3_SIZE_KB 0 /* disabled */
144 #define L3_ASSOCIATIVITY 0 /* disabled */
145 #define L3_LINES_PER_TAG 0 /* disabled */
146 #define L3_LINE_SIZE 0 /* disabled */
147 #define L3_N_LINE_SIZE 64
148 #define L3_N_ASSOCIATIVITY 16
149 #define L3_N_SETS 16384
150 #define L3_N_PARTITIONS 1
151 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
152 #define L3_N_LINES_PER_TAG 1
153 #define L3_N_SIZE_KB_AMD 16384
155 /* TLB definitions: */
157 #define L1_DTLB_2M_ASSOC 1
158 #define L1_DTLB_2M_ENTRIES 255
159 #define L1_DTLB_4K_ASSOC 1
160 #define L1_DTLB_4K_ENTRIES 255
162 #define L1_ITLB_2M_ASSOC 1
163 #define L1_ITLB_2M_ENTRIES 255
164 #define L1_ITLB_4K_ASSOC 1
165 #define L1_ITLB_4K_ENTRIES 255
167 #define L2_DTLB_2M_ASSOC 0 /* disabled */
168 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
169 #define L2_DTLB_4K_ASSOC 4
170 #define L2_DTLB_4K_ENTRIES 512
172 #define L2_ITLB_2M_ASSOC 0 /* disabled */
173 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
174 #define L2_ITLB_4K_ASSOC 4
175 #define L2_ITLB_4K_ENTRIES 512
177 /* CPUID Leaf 0x14 constants: */
178 #define INTEL_PT_MAX_SUBLEAF 0x1
180 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
181 * MSR can be accessed;
182 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
183 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
184 * of Intel PT MSRs across warm reset;
185 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
187 #define INTEL_PT_MINIMAL_EBX 0xf
189 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
190 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
191 * accessed;
192 * bit[01]: ToPA tables can hold any number of output entries, up to the
193 * maximum allowed by the MaskOrTableOffset field of
194 * IA32_RTIT_OUTPUT_MASK_PTRS;
195 * bit[02]: Support Single-Range Output scheme;
197 #define INTEL_PT_MINIMAL_ECX 0x7
198 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
199 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
200 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
201 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
202 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
204 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
205 uint32_t vendor2, uint32_t vendor3)
207 int i;
208 for (i = 0; i < 4; i++) {
209 dst[i] = vendor1 >> (8 * i);
210 dst[i + 4] = vendor2 >> (8 * i);
211 dst[i + 8] = vendor3 >> (8 * i);
213 dst[CPUID_VENDOR_SZ] = '\0';
216 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
217 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
218 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
219 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
220 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
221 CPUID_PSE36 | CPUID_FXSR)
222 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
223 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
224 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
225 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
226 CPUID_PAE | CPUID_SEP | CPUID_APIC)
228 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
229 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
230 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
231 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
232 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
233 /* partly implemented:
234 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
235 /* missing:
236 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
237 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
238 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
239 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
240 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
241 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
242 /* missing:
243 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
244 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
245 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
246 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
247 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
249 #ifdef TARGET_X86_64
250 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
251 #else
252 #define TCG_EXT2_X86_64_FEATURES 0
253 #endif
255 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
256 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
257 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
258 TCG_EXT2_X86_64_FEATURES)
259 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
260 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
261 #define TCG_EXT4_FEATURES 0
262 #define TCG_SVM_FEATURES 0
263 #define TCG_KVM_FEATURES 0
264 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
265 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
266 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
267 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
268 CPUID_7_0_EBX_ERMS)
269 /* missing:
270 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
271 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
272 CPUID_7_0_EBX_RDSEED */
273 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
274 CPUID_7_0_ECX_LA57)
275 #define TCG_7_0_EDX_FEATURES 0
276 #define TCG_APM_FEATURES 0
277 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
278 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
279 /* missing:
280 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
282 typedef struct FeatureWordInfo {
283 /* feature flags names are taken from "Intel Processor Identification and
284 * the CPUID Instruction" and AMD's "CPUID Specification".
285 * In cases of disagreement between feature naming conventions,
286 * aliases may be added.
288 const char *feat_names[32];
289 uint32_t cpuid_eax; /* Input EAX for CPUID */
290 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
291 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
292 int cpuid_reg; /* output register (R_* constant) */
293 uint32_t tcg_features; /* Feature flags supported by TCG */
294 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
295 uint32_t migratable_flags; /* Feature flags known to be migratable */
296 } FeatureWordInfo;
298 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
299 [FEAT_1_EDX] = {
300 .feat_names = {
301 "fpu", "vme", "de", "pse",
302 "tsc", "msr", "pae", "mce",
303 "cx8", "apic", NULL, "sep",
304 "mtrr", "pge", "mca", "cmov",
305 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
306 NULL, "ds" /* Intel dts */, "acpi", "mmx",
307 "fxsr", "sse", "sse2", "ss",
308 "ht" /* Intel htt */, "tm", "ia64", "pbe",
310 .cpuid_eax = 1, .cpuid_reg = R_EDX,
311 .tcg_features = TCG_FEATURES,
313 [FEAT_1_ECX] = {
314 .feat_names = {
315 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
316 "ds-cpl", "vmx", "smx", "est",
317 "tm2", "ssse3", "cid", NULL,
318 "fma", "cx16", "xtpr", "pdcm",
319 NULL, "pcid", "dca", "sse4.1",
320 "sse4.2", "x2apic", "movbe", "popcnt",
321 "tsc-deadline", "aes", "xsave", "osxsave",
322 "avx", "f16c", "rdrand", "hypervisor",
324 .cpuid_eax = 1, .cpuid_reg = R_ECX,
325 .tcg_features = TCG_EXT_FEATURES,
327 /* Feature names that are already defined on feature_name[] but
328 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
329 * names on feat_names below. They are copied automatically
330 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
332 [FEAT_8000_0001_EDX] = {
333 .feat_names = {
334 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
335 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
336 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
337 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
338 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
339 "nx", NULL, "mmxext", NULL /* mmx */,
340 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
341 NULL, "lm", "3dnowext", "3dnow",
343 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
344 .tcg_features = TCG_EXT2_FEATURES,
346 [FEAT_8000_0001_ECX] = {
347 .feat_names = {
348 "lahf-lm", "cmp-legacy", "svm", "extapic",
349 "cr8legacy", "abm", "sse4a", "misalignsse",
350 "3dnowprefetch", "osvw", "ibs", "xop",
351 "skinit", "wdt", NULL, "lwp",
352 "fma4", "tce", NULL, "nodeid-msr",
353 NULL, "tbm", "topoext", "perfctr-core",
354 "perfctr-nb", NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
357 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
358 .tcg_features = TCG_EXT3_FEATURES,
360 [FEAT_C000_0001_EDX] = {
361 .feat_names = {
362 NULL, NULL, "xstore", "xstore-en",
363 NULL, NULL, "xcrypt", "xcrypt-en",
364 "ace2", "ace2-en", "phe", "phe-en",
365 "pmm", "pmm-en", NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
371 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
372 .tcg_features = TCG_EXT4_FEATURES,
374 [FEAT_KVM] = {
375 .feat_names = {
376 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
377 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
378 NULL, "kvm-pv-tlb-flush", NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 "kvmclock-stable-bit", NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL,
385 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
386 .tcg_features = TCG_KVM_FEATURES,
388 [FEAT_KVM_HINTS] = {
389 .feat_names = {
390 "kvm-hint-dedicated", NULL, NULL, NULL,
391 NULL, NULL, NULL, NULL,
392 NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
399 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
400 .tcg_features = TCG_KVM_FEATURES,
402 [FEAT_HYPERV_EAX] = {
403 .feat_names = {
404 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
405 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
406 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
407 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
408 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
409 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
410 NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
416 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
418 [FEAT_HYPERV_EBX] = {
419 .feat_names = {
420 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
421 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
422 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
423 NULL /* hv_create_port */, NULL /* hv_connect_port */,
424 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
425 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
426 NULL, NULL,
427 NULL, NULL, NULL, NULL,
428 NULL, NULL, NULL, NULL,
429 NULL, NULL, NULL, NULL,
430 NULL, NULL, NULL, NULL,
432 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
434 [FEAT_HYPERV_EDX] = {
435 .feat_names = {
436 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
437 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
438 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
439 NULL, NULL,
440 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
441 NULL, NULL, NULL, NULL,
442 NULL, NULL, NULL, NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
445 NULL, NULL, NULL, NULL,
447 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
449 [FEAT_SVM] = {
450 .feat_names = {
451 "npt", "lbrv", "svm-lock", "nrip-save",
452 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
453 NULL, NULL, "pause-filter", NULL,
454 "pfthreshold", NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
460 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
461 .tcg_features = TCG_SVM_FEATURES,
463 [FEAT_7_0_EBX] = {
464 .feat_names = {
465 "fsgsbase", "tsc-adjust", NULL, "bmi1",
466 "hle", "avx2", NULL, "smep",
467 "bmi2", "erms", "invpcid", "rtm",
468 NULL, NULL, "mpx", NULL,
469 "avx512f", "avx512dq", "rdseed", "adx",
470 "smap", "avx512ifma", "pcommit", "clflushopt",
471 "clwb", "intel-pt", "avx512pf", "avx512er",
472 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
474 .cpuid_eax = 7,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
476 .cpuid_reg = R_EBX,
477 .tcg_features = TCG_7_0_EBX_FEATURES,
479 [FEAT_7_0_ECX] = {
480 .feat_names = {
481 NULL, "avx512vbmi", "umip", "pku",
482 "ospke", NULL, "avx512vbmi2", NULL,
483 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
484 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
485 "la57", NULL, NULL, NULL,
486 NULL, NULL, "rdpid", NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
490 .cpuid_eax = 7,
491 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
492 .cpuid_reg = R_ECX,
493 .tcg_features = TCG_7_0_ECX_FEATURES,
495 [FEAT_7_0_EDX] = {
496 .feat_names = {
497 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
498 NULL, NULL, NULL, NULL,
499 NULL, NULL, NULL, NULL,
500 NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, "spec-ctrl", NULL,
504 NULL, NULL, NULL, NULL,
506 .cpuid_eax = 7,
507 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
508 .cpuid_reg = R_EDX,
509 .tcg_features = TCG_7_0_EDX_FEATURES,
511 [FEAT_8000_0007_EDX] = {
512 .feat_names = {
513 NULL, NULL, NULL, NULL,
514 NULL, NULL, NULL, NULL,
515 "invtsc", NULL, NULL, NULL,
516 NULL, NULL, NULL, NULL,
517 NULL, NULL, NULL, NULL,
518 NULL, NULL, NULL, NULL,
519 NULL, NULL, NULL, NULL,
520 NULL, NULL, NULL, NULL,
522 .cpuid_eax = 0x80000007,
523 .cpuid_reg = R_EDX,
524 .tcg_features = TCG_APM_FEATURES,
525 .unmigratable_flags = CPUID_APM_INVTSC,
527 [FEAT_8000_0008_EBX] = {
528 .feat_names = {
529 NULL, NULL, NULL, NULL,
530 NULL, NULL, NULL, NULL,
531 NULL, NULL, NULL, NULL,
532 "ibpb", NULL, NULL, NULL,
533 NULL, NULL, NULL, NULL,
534 NULL, NULL, NULL, NULL,
535 NULL, NULL, NULL, NULL,
536 NULL, NULL, NULL, NULL,
538 .cpuid_eax = 0x80000008,
539 .cpuid_reg = R_EBX,
540 .tcg_features = 0,
541 .unmigratable_flags = 0,
543 [FEAT_XSAVE] = {
544 .feat_names = {
545 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
546 NULL, NULL, NULL, NULL,
547 NULL, NULL, NULL, NULL,
548 NULL, NULL, NULL, NULL,
549 NULL, NULL, NULL, NULL,
550 NULL, NULL, NULL, NULL,
551 NULL, NULL, NULL, NULL,
552 NULL, NULL, NULL, NULL,
554 .cpuid_eax = 0xd,
555 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
556 .cpuid_reg = R_EAX,
557 .tcg_features = TCG_XSAVE_FEATURES,
559 [FEAT_6_EAX] = {
560 .feat_names = {
561 NULL, NULL, "arat", NULL,
562 NULL, NULL, NULL, NULL,
563 NULL, NULL, NULL, NULL,
564 NULL, NULL, NULL, NULL,
565 NULL, NULL, NULL, NULL,
566 NULL, NULL, NULL, NULL,
567 NULL, NULL, NULL, NULL,
568 NULL, NULL, NULL, NULL,
570 .cpuid_eax = 6, .cpuid_reg = R_EAX,
571 .tcg_features = TCG_6_EAX_FEATURES,
573 [FEAT_XSAVE_COMP_LO] = {
574 .cpuid_eax = 0xD,
575 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
576 .cpuid_reg = R_EAX,
577 .tcg_features = ~0U,
578 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
579 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
580 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
581 XSTATE_PKRU_MASK,
583 [FEAT_XSAVE_COMP_HI] = {
584 .cpuid_eax = 0xD,
585 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
586 .cpuid_reg = R_EDX,
587 .tcg_features = ~0U,
591 typedef struct X86RegisterInfo32 {
592 /* Name of register */
593 const char *name;
594 /* QAPI enum value register */
595 X86CPURegister32 qapi_enum;
596 } X86RegisterInfo32;
598 #define REGISTER(reg) \
599 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
600 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
601 REGISTER(EAX),
602 REGISTER(ECX),
603 REGISTER(EDX),
604 REGISTER(EBX),
605 REGISTER(ESP),
606 REGISTER(EBP),
607 REGISTER(ESI),
608 REGISTER(EDI),
610 #undef REGISTER
612 typedef struct ExtSaveArea {
613 uint32_t feature, bits;
614 uint32_t offset, size;
615 } ExtSaveArea;
617 static const ExtSaveArea x86_ext_save_areas[] = {
618 [XSTATE_FP_BIT] = {
619 /* x87 FP state component is always enabled if XSAVE is supported */
620 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
621 /* x87 state is in the legacy region of the XSAVE area */
622 .offset = 0,
623 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
625 [XSTATE_SSE_BIT] = {
626 /* SSE state component is always enabled if XSAVE is supported */
627 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
628 /* SSE state is in the legacy region of the XSAVE area */
629 .offset = 0,
630 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
632 [XSTATE_YMM_BIT] =
633 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
634 .offset = offsetof(X86XSaveArea, avx_state),
635 .size = sizeof(XSaveAVX) },
636 [XSTATE_BNDREGS_BIT] =
637 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
638 .offset = offsetof(X86XSaveArea, bndreg_state),
639 .size = sizeof(XSaveBNDREG) },
640 [XSTATE_BNDCSR_BIT] =
641 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
642 .offset = offsetof(X86XSaveArea, bndcsr_state),
643 .size = sizeof(XSaveBNDCSR) },
644 [XSTATE_OPMASK_BIT] =
645 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
646 .offset = offsetof(X86XSaveArea, opmask_state),
647 .size = sizeof(XSaveOpmask) },
648 [XSTATE_ZMM_Hi256_BIT] =
649 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
650 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
651 .size = sizeof(XSaveZMM_Hi256) },
652 [XSTATE_Hi16_ZMM_BIT] =
653 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
654 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
655 .size = sizeof(XSaveHi16_ZMM) },
656 [XSTATE_PKRU_BIT] =
657 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
658 .offset = offsetof(X86XSaveArea, pkru_state),
659 .size = sizeof(XSavePKRU) },
662 static uint32_t xsave_area_size(uint64_t mask)
664 int i;
665 uint64_t ret = 0;
667 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
668 const ExtSaveArea *esa = &x86_ext_save_areas[i];
669 if ((mask >> i) & 1) {
670 ret = MAX(ret, esa->offset + esa->size);
673 return ret;
676 static inline bool accel_uses_host_cpuid(void)
678 return kvm_enabled() || hvf_enabled();
681 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
683 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
684 cpu->env.features[FEAT_XSAVE_COMP_LO];
687 const char *get_register_name_32(unsigned int reg)
689 if (reg >= CPU_NB_REGS32) {
690 return NULL;
692 return x86_reg_info_32[reg].name;
696 * Returns the set of feature flags that are supported and migratable by
697 * QEMU, for a given FeatureWord.
699 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
701 FeatureWordInfo *wi = &feature_word_info[w];
702 uint32_t r = 0;
703 int i;
705 for (i = 0; i < 32; i++) {
706 uint32_t f = 1U << i;
708 /* If the feature name is known, it is implicitly considered migratable,
709 * unless it is explicitly set in unmigratable_flags */
710 if ((wi->migratable_flags & f) ||
711 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
712 r |= f;
715 return r;
718 void host_cpuid(uint32_t function, uint32_t count,
719 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
721 uint32_t vec[4];
723 #ifdef __x86_64__
724 asm volatile("cpuid"
725 : "=a"(vec[0]), "=b"(vec[1]),
726 "=c"(vec[2]), "=d"(vec[3])
727 : "0"(function), "c"(count) : "cc");
728 #elif defined(__i386__)
729 asm volatile("pusha \n\t"
730 "cpuid \n\t"
731 "mov %%eax, 0(%2) \n\t"
732 "mov %%ebx, 4(%2) \n\t"
733 "mov %%ecx, 8(%2) \n\t"
734 "mov %%edx, 12(%2) \n\t"
735 "popa"
736 : : "a"(function), "c"(count), "S"(vec)
737 : "memory", "cc");
738 #else
739 abort();
740 #endif
742 if (eax)
743 *eax = vec[0];
744 if (ebx)
745 *ebx = vec[1];
746 if (ecx)
747 *ecx = vec[2];
748 if (edx)
749 *edx = vec[3];
752 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
754 uint32_t eax, ebx, ecx, edx;
756 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
757 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
759 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
760 if (family) {
761 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
763 if (model) {
764 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
766 if (stepping) {
767 *stepping = eax & 0x0F;
771 /* CPU class name definitions: */
773 /* Return type name for a given CPU model name
774 * Caller is responsible for freeing the returned string.
776 static char *x86_cpu_type_name(const char *model_name)
778 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
781 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
783 ObjectClass *oc;
784 char *typename;
786 if (cpu_model == NULL) {
787 return NULL;
790 typename = x86_cpu_type_name(cpu_model);
791 oc = object_class_by_name(typename);
792 g_free(typename);
793 return oc;
796 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
798 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
799 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
800 return g_strndup(class_name,
801 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
804 struct X86CPUDefinition {
805 const char *name;
806 uint32_t level;
807 uint32_t xlevel;
808 /* vendor is zero-terminated, 12 character ASCII string */
809 char vendor[CPUID_VENDOR_SZ + 1];
810 int family;
811 int model;
812 int stepping;
813 FeatureWordArray features;
814 const char *model_id;
817 static X86CPUDefinition builtin_x86_defs[] = {
819 .name = "qemu64",
820 .level = 0xd,
821 .vendor = CPUID_VENDOR_AMD,
822 .family = 6,
823 .model = 6,
824 .stepping = 3,
825 .features[FEAT_1_EDX] =
826 PPRO_FEATURES |
827 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
828 CPUID_PSE36,
829 .features[FEAT_1_ECX] =
830 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
831 .features[FEAT_8000_0001_EDX] =
832 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
833 .features[FEAT_8000_0001_ECX] =
834 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
835 .xlevel = 0x8000000A,
836 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
839 .name = "phenom",
840 .level = 5,
841 .vendor = CPUID_VENDOR_AMD,
842 .family = 16,
843 .model = 2,
844 .stepping = 3,
845 /* Missing: CPUID_HT */
846 .features[FEAT_1_EDX] =
847 PPRO_FEATURES |
848 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
849 CPUID_PSE36 | CPUID_VME,
850 .features[FEAT_1_ECX] =
851 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
852 CPUID_EXT_POPCNT,
853 .features[FEAT_8000_0001_EDX] =
854 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
855 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
856 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
857 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
858 CPUID_EXT3_CR8LEG,
859 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
860 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
861 .features[FEAT_8000_0001_ECX] =
862 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
863 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
864 /* Missing: CPUID_SVM_LBRV */
865 .features[FEAT_SVM] =
866 CPUID_SVM_NPT,
867 .xlevel = 0x8000001A,
868 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
871 .name = "core2duo",
872 .level = 10,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 6,
875 .model = 15,
876 .stepping = 11,
877 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
878 .features[FEAT_1_EDX] =
879 PPRO_FEATURES |
880 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
881 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
882 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
883 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
884 .features[FEAT_1_ECX] =
885 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
886 CPUID_EXT_CX16,
887 .features[FEAT_8000_0001_EDX] =
888 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
889 .features[FEAT_8000_0001_ECX] =
890 CPUID_EXT3_LAHF_LM,
891 .xlevel = 0x80000008,
892 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
895 .name = "kvm64",
896 .level = 0xd,
897 .vendor = CPUID_VENDOR_INTEL,
898 .family = 15,
899 .model = 6,
900 .stepping = 1,
901 /* Missing: CPUID_HT */
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_VME |
904 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
905 CPUID_PSE36,
906 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
907 .features[FEAT_1_ECX] =
908 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
909 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
910 .features[FEAT_8000_0001_EDX] =
911 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
912 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
913 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
914 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
915 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
916 .features[FEAT_8000_0001_ECX] =
918 .xlevel = 0x80000008,
919 .model_id = "Common KVM processor"
922 .name = "qemu32",
923 .level = 4,
924 .vendor = CPUID_VENDOR_INTEL,
925 .family = 6,
926 .model = 6,
927 .stepping = 3,
928 .features[FEAT_1_EDX] =
929 PPRO_FEATURES,
930 .features[FEAT_1_ECX] =
931 CPUID_EXT_SSE3,
932 .xlevel = 0x80000004,
933 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
936 .name = "kvm32",
937 .level = 5,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 15,
940 .model = 6,
941 .stepping = 1,
942 .features[FEAT_1_EDX] =
943 PPRO_FEATURES | CPUID_VME |
944 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
945 .features[FEAT_1_ECX] =
946 CPUID_EXT_SSE3,
947 .features[FEAT_8000_0001_ECX] =
949 .xlevel = 0x80000008,
950 .model_id = "Common 32-bit KVM processor"
953 .name = "coreduo",
954 .level = 10,
955 .vendor = CPUID_VENDOR_INTEL,
956 .family = 6,
957 .model = 14,
958 .stepping = 8,
959 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
960 .features[FEAT_1_EDX] =
961 PPRO_FEATURES | CPUID_VME |
962 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
963 CPUID_SS,
964 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
965 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
966 .features[FEAT_1_ECX] =
967 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
968 .features[FEAT_8000_0001_EDX] =
969 CPUID_EXT2_NX,
970 .xlevel = 0x80000008,
971 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
974 .name = "486",
975 .level = 1,
976 .vendor = CPUID_VENDOR_INTEL,
977 .family = 4,
978 .model = 8,
979 .stepping = 0,
980 .features[FEAT_1_EDX] =
981 I486_FEATURES,
982 .xlevel = 0,
983 .model_id = "",
986 .name = "pentium",
987 .level = 1,
988 .vendor = CPUID_VENDOR_INTEL,
989 .family = 5,
990 .model = 4,
991 .stepping = 3,
992 .features[FEAT_1_EDX] =
993 PENTIUM_FEATURES,
994 .xlevel = 0,
995 .model_id = "",
998 .name = "pentium2",
999 .level = 2,
1000 .vendor = CPUID_VENDOR_INTEL,
1001 .family = 6,
1002 .model = 5,
1003 .stepping = 2,
1004 .features[FEAT_1_EDX] =
1005 PENTIUM2_FEATURES,
1006 .xlevel = 0,
1007 .model_id = "",
1010 .name = "pentium3",
1011 .level = 3,
1012 .vendor = CPUID_VENDOR_INTEL,
1013 .family = 6,
1014 .model = 7,
1015 .stepping = 3,
1016 .features[FEAT_1_EDX] =
1017 PENTIUM3_FEATURES,
1018 .xlevel = 0,
1019 .model_id = "",
1022 .name = "athlon",
1023 .level = 2,
1024 .vendor = CPUID_VENDOR_AMD,
1025 .family = 6,
1026 .model = 2,
1027 .stepping = 3,
1028 .features[FEAT_1_EDX] =
1029 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1030 CPUID_MCA,
1031 .features[FEAT_8000_0001_EDX] =
1032 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1033 .xlevel = 0x80000008,
1034 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1037 .name = "n270",
1038 .level = 10,
1039 .vendor = CPUID_VENDOR_INTEL,
1040 .family = 6,
1041 .model = 28,
1042 .stepping = 2,
1043 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1044 .features[FEAT_1_EDX] =
1045 PPRO_FEATURES |
1046 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1047 CPUID_ACPI | CPUID_SS,
1048 /* Some CPUs got no CPUID_SEP */
1049 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1050 * CPUID_EXT_XTPR */
1051 .features[FEAT_1_ECX] =
1052 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1053 CPUID_EXT_MOVBE,
1054 .features[FEAT_8000_0001_EDX] =
1055 CPUID_EXT2_NX,
1056 .features[FEAT_8000_0001_ECX] =
1057 CPUID_EXT3_LAHF_LM,
1058 .xlevel = 0x80000008,
1059 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1062 .name = "Conroe",
1063 .level = 10,
1064 .vendor = CPUID_VENDOR_INTEL,
1065 .family = 6,
1066 .model = 15,
1067 .stepping = 3,
1068 .features[FEAT_1_EDX] =
1069 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1070 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1071 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1072 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1073 CPUID_DE | CPUID_FP87,
1074 .features[FEAT_1_ECX] =
1075 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1076 .features[FEAT_8000_0001_EDX] =
1077 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1078 .features[FEAT_8000_0001_ECX] =
1079 CPUID_EXT3_LAHF_LM,
1080 .xlevel = 0x80000008,
1081 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1084 .name = "Penryn",
1085 .level = 10,
1086 .vendor = CPUID_VENDOR_INTEL,
1087 .family = 6,
1088 .model = 23,
1089 .stepping = 3,
1090 .features[FEAT_1_EDX] =
1091 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1092 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1093 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1094 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1095 CPUID_DE | CPUID_FP87,
1096 .features[FEAT_1_ECX] =
1097 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1098 CPUID_EXT_SSE3,
1099 .features[FEAT_8000_0001_EDX] =
1100 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1101 .features[FEAT_8000_0001_ECX] =
1102 CPUID_EXT3_LAHF_LM,
1103 .xlevel = 0x80000008,
1104 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1107 .name = "Nehalem",
1108 .level = 11,
1109 .vendor = CPUID_VENDOR_INTEL,
1110 .family = 6,
1111 .model = 26,
1112 .stepping = 3,
1113 .features[FEAT_1_EDX] =
1114 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1115 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1116 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1117 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1118 CPUID_DE | CPUID_FP87,
1119 .features[FEAT_1_ECX] =
1120 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1121 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1122 .features[FEAT_8000_0001_EDX] =
1123 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1124 .features[FEAT_8000_0001_ECX] =
1125 CPUID_EXT3_LAHF_LM,
1126 .xlevel = 0x80000008,
1127 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1130 .name = "Nehalem-IBRS",
1131 .level = 11,
1132 .vendor = CPUID_VENDOR_INTEL,
1133 .family = 6,
1134 .model = 26,
1135 .stepping = 3,
1136 .features[FEAT_1_EDX] =
1137 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1138 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1139 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1140 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1141 CPUID_DE | CPUID_FP87,
1142 .features[FEAT_1_ECX] =
1143 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1144 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1145 .features[FEAT_7_0_EDX] =
1146 CPUID_7_0_EDX_SPEC_CTRL,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1149 .features[FEAT_8000_0001_ECX] =
1150 CPUID_EXT3_LAHF_LM,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1155 .name = "Westmere",
1156 .level = 11,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 44,
1160 .stepping = 1,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1169 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1170 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1171 .features[FEAT_8000_0001_EDX] =
1172 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1173 .features[FEAT_8000_0001_ECX] =
1174 CPUID_EXT3_LAHF_LM,
1175 .features[FEAT_6_EAX] =
1176 CPUID_6_EAX_ARAT,
1177 .xlevel = 0x80000008,
1178 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1181 .name = "Westmere-IBRS",
1182 .level = 11,
1183 .vendor = CPUID_VENDOR_INTEL,
1184 .family = 6,
1185 .model = 44,
1186 .stepping = 1,
1187 .features[FEAT_1_EDX] =
1188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1192 CPUID_DE | CPUID_FP87,
1193 .features[FEAT_1_ECX] =
1194 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1195 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1196 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1197 .features[FEAT_8000_0001_EDX] =
1198 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1199 .features[FEAT_8000_0001_ECX] =
1200 CPUID_EXT3_LAHF_LM,
1201 .features[FEAT_7_0_EDX] =
1202 CPUID_7_0_EDX_SPEC_CTRL,
1203 .features[FEAT_6_EAX] =
1204 CPUID_6_EAX_ARAT,
1205 .xlevel = 0x80000008,
1206 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1209 .name = "SandyBridge",
1210 .level = 0xd,
1211 .vendor = CPUID_VENDOR_INTEL,
1212 .family = 6,
1213 .model = 42,
1214 .stepping = 1,
1215 .features[FEAT_1_EDX] =
1216 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1217 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1218 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1219 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1220 CPUID_DE | CPUID_FP87,
1221 .features[FEAT_1_ECX] =
1222 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1223 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1224 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1225 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1226 CPUID_EXT_SSE3,
1227 .features[FEAT_8000_0001_EDX] =
1228 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1229 CPUID_EXT2_SYSCALL,
1230 .features[FEAT_8000_0001_ECX] =
1231 CPUID_EXT3_LAHF_LM,
1232 .features[FEAT_XSAVE] =
1233 CPUID_XSAVE_XSAVEOPT,
1234 .features[FEAT_6_EAX] =
1235 CPUID_6_EAX_ARAT,
1236 .xlevel = 0x80000008,
1237 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1240 .name = "SandyBridge-IBRS",
1241 .level = 0xd,
1242 .vendor = CPUID_VENDOR_INTEL,
1243 .family = 6,
1244 .model = 42,
1245 .stepping = 1,
1246 .features[FEAT_1_EDX] =
1247 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1248 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1249 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1250 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1251 CPUID_DE | CPUID_FP87,
1252 .features[FEAT_1_ECX] =
1253 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1254 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1255 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1256 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1257 CPUID_EXT_SSE3,
1258 .features[FEAT_8000_0001_EDX] =
1259 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1260 CPUID_EXT2_SYSCALL,
1261 .features[FEAT_8000_0001_ECX] =
1262 CPUID_EXT3_LAHF_LM,
1263 .features[FEAT_7_0_EDX] =
1264 CPUID_7_0_EDX_SPEC_CTRL,
1265 .features[FEAT_XSAVE] =
1266 CPUID_XSAVE_XSAVEOPT,
1267 .features[FEAT_6_EAX] =
1268 CPUID_6_EAX_ARAT,
1269 .xlevel = 0x80000008,
1270 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1273 .name = "IvyBridge",
1274 .level = 0xd,
1275 .vendor = CPUID_VENDOR_INTEL,
1276 .family = 6,
1277 .model = 58,
1278 .stepping = 9,
1279 .features[FEAT_1_EDX] =
1280 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1281 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1282 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1283 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1284 CPUID_DE | CPUID_FP87,
1285 .features[FEAT_1_ECX] =
1286 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1287 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1288 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1289 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1290 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1291 .features[FEAT_7_0_EBX] =
1292 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1293 CPUID_7_0_EBX_ERMS,
1294 .features[FEAT_8000_0001_EDX] =
1295 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1296 CPUID_EXT2_SYSCALL,
1297 .features[FEAT_8000_0001_ECX] =
1298 CPUID_EXT3_LAHF_LM,
1299 .features[FEAT_XSAVE] =
1300 CPUID_XSAVE_XSAVEOPT,
1301 .features[FEAT_6_EAX] =
1302 CPUID_6_EAX_ARAT,
1303 .xlevel = 0x80000008,
1304 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1307 .name = "IvyBridge-IBRS",
1308 .level = 0xd,
1309 .vendor = CPUID_VENDOR_INTEL,
1310 .family = 6,
1311 .model = 58,
1312 .stepping = 9,
1313 .features[FEAT_1_EDX] =
1314 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1315 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1316 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1317 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1318 CPUID_DE | CPUID_FP87,
1319 .features[FEAT_1_ECX] =
1320 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1321 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1322 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1323 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1324 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1325 .features[FEAT_7_0_EBX] =
1326 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1327 CPUID_7_0_EBX_ERMS,
1328 .features[FEAT_8000_0001_EDX] =
1329 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1330 CPUID_EXT2_SYSCALL,
1331 .features[FEAT_8000_0001_ECX] =
1332 CPUID_EXT3_LAHF_LM,
1333 .features[FEAT_7_0_EDX] =
1334 CPUID_7_0_EDX_SPEC_CTRL,
1335 .features[FEAT_XSAVE] =
1336 CPUID_XSAVE_XSAVEOPT,
1337 .features[FEAT_6_EAX] =
1338 CPUID_6_EAX_ARAT,
1339 .xlevel = 0x80000008,
1340 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1343 .name = "Haswell-noTSX",
1344 .level = 0xd,
1345 .vendor = CPUID_VENDOR_INTEL,
1346 .family = 6,
1347 .model = 60,
1348 .stepping = 1,
1349 .features[FEAT_1_EDX] =
1350 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1351 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1352 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1353 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1354 CPUID_DE | CPUID_FP87,
1355 .features[FEAT_1_ECX] =
1356 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1357 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1358 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1359 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1360 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1361 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1362 .features[FEAT_8000_0001_EDX] =
1363 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1364 CPUID_EXT2_SYSCALL,
1365 .features[FEAT_8000_0001_ECX] =
1366 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1367 .features[FEAT_7_0_EBX] =
1368 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1369 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1370 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1371 .features[FEAT_XSAVE] =
1372 CPUID_XSAVE_XSAVEOPT,
1373 .features[FEAT_6_EAX] =
1374 CPUID_6_EAX_ARAT,
1375 .xlevel = 0x80000008,
1376 .model_id = "Intel Core Processor (Haswell, no TSX)",
1379 .name = "Haswell-noTSX-IBRS",
1380 .level = 0xd,
1381 .vendor = CPUID_VENDOR_INTEL,
1382 .family = 6,
1383 .model = 60,
1384 .stepping = 1,
1385 .features[FEAT_1_EDX] =
1386 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1387 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1388 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1389 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1390 CPUID_DE | CPUID_FP87,
1391 .features[FEAT_1_ECX] =
1392 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1393 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1394 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1395 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1396 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1397 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1398 .features[FEAT_8000_0001_EDX] =
1399 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1400 CPUID_EXT2_SYSCALL,
1401 .features[FEAT_8000_0001_ECX] =
1402 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1403 .features[FEAT_7_0_EDX] =
1404 CPUID_7_0_EDX_SPEC_CTRL,
1405 .features[FEAT_7_0_EBX] =
1406 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1407 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1408 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1409 .features[FEAT_XSAVE] =
1410 CPUID_XSAVE_XSAVEOPT,
1411 .features[FEAT_6_EAX] =
1412 CPUID_6_EAX_ARAT,
1413 .xlevel = 0x80000008,
1414 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1417 .name = "Haswell",
1418 .level = 0xd,
1419 .vendor = CPUID_VENDOR_INTEL,
1420 .family = 6,
1421 .model = 60,
1422 .stepping = 4,
1423 .features[FEAT_1_EDX] =
1424 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1425 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1426 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1427 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1428 CPUID_DE | CPUID_FP87,
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1431 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1432 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1433 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1434 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1435 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1436 .features[FEAT_8000_0001_EDX] =
1437 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1438 CPUID_EXT2_SYSCALL,
1439 .features[FEAT_8000_0001_ECX] =
1440 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1441 .features[FEAT_7_0_EBX] =
1442 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1443 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1444 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1445 CPUID_7_0_EBX_RTM,
1446 .features[FEAT_XSAVE] =
1447 CPUID_XSAVE_XSAVEOPT,
1448 .features[FEAT_6_EAX] =
1449 CPUID_6_EAX_ARAT,
1450 .xlevel = 0x80000008,
1451 .model_id = "Intel Core Processor (Haswell)",
1454 .name = "Haswell-IBRS",
1455 .level = 0xd,
1456 .vendor = CPUID_VENDOR_INTEL,
1457 .family = 6,
1458 .model = 60,
1459 .stepping = 4,
1460 .features[FEAT_1_EDX] =
1461 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1462 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1463 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1464 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1465 CPUID_DE | CPUID_FP87,
1466 .features[FEAT_1_ECX] =
1467 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1468 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1469 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1470 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1471 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1472 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1473 .features[FEAT_8000_0001_EDX] =
1474 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1475 CPUID_EXT2_SYSCALL,
1476 .features[FEAT_8000_0001_ECX] =
1477 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1478 .features[FEAT_7_0_EDX] =
1479 CPUID_7_0_EDX_SPEC_CTRL,
1480 .features[FEAT_7_0_EBX] =
1481 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1482 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1483 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1484 CPUID_7_0_EBX_RTM,
1485 .features[FEAT_XSAVE] =
1486 CPUID_XSAVE_XSAVEOPT,
1487 .features[FEAT_6_EAX] =
1488 CPUID_6_EAX_ARAT,
1489 .xlevel = 0x80000008,
1490 .model_id = "Intel Core Processor (Haswell, IBRS)",
1493 .name = "Broadwell-noTSX",
1494 .level = 0xd,
1495 .vendor = CPUID_VENDOR_INTEL,
1496 .family = 6,
1497 .model = 61,
1498 .stepping = 2,
1499 .features[FEAT_1_EDX] =
1500 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1501 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1502 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1503 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1504 CPUID_DE | CPUID_FP87,
1505 .features[FEAT_1_ECX] =
1506 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1507 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1508 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1509 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1510 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1511 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1512 .features[FEAT_8000_0001_EDX] =
1513 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1514 CPUID_EXT2_SYSCALL,
1515 .features[FEAT_8000_0001_ECX] =
1516 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1517 .features[FEAT_7_0_EBX] =
1518 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1519 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1520 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1521 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1522 CPUID_7_0_EBX_SMAP,
1523 .features[FEAT_XSAVE] =
1524 CPUID_XSAVE_XSAVEOPT,
1525 .features[FEAT_6_EAX] =
1526 CPUID_6_EAX_ARAT,
1527 .xlevel = 0x80000008,
1528 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1531 .name = "Broadwell-noTSX-IBRS",
1532 .level = 0xd,
1533 .vendor = CPUID_VENDOR_INTEL,
1534 .family = 6,
1535 .model = 61,
1536 .stepping = 2,
1537 .features[FEAT_1_EDX] =
1538 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1539 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1540 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1541 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1542 CPUID_DE | CPUID_FP87,
1543 .features[FEAT_1_ECX] =
1544 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1545 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1546 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1547 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1548 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1549 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1550 .features[FEAT_8000_0001_EDX] =
1551 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1552 CPUID_EXT2_SYSCALL,
1553 .features[FEAT_8000_0001_ECX] =
1554 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1555 .features[FEAT_7_0_EDX] =
1556 CPUID_7_0_EDX_SPEC_CTRL,
1557 .features[FEAT_7_0_EBX] =
1558 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1559 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1560 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1561 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1562 CPUID_7_0_EBX_SMAP,
1563 .features[FEAT_XSAVE] =
1564 CPUID_XSAVE_XSAVEOPT,
1565 .features[FEAT_6_EAX] =
1566 CPUID_6_EAX_ARAT,
1567 .xlevel = 0x80000008,
1568 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1571 .name = "Broadwell",
1572 .level = 0xd,
1573 .vendor = CPUID_VENDOR_INTEL,
1574 .family = 6,
1575 .model = 61,
1576 .stepping = 2,
1577 .features[FEAT_1_EDX] =
1578 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1579 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1580 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1581 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1582 CPUID_DE | CPUID_FP87,
1583 .features[FEAT_1_ECX] =
1584 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1585 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1586 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1587 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1588 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1589 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1590 .features[FEAT_8000_0001_EDX] =
1591 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1592 CPUID_EXT2_SYSCALL,
1593 .features[FEAT_8000_0001_ECX] =
1594 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1595 .features[FEAT_7_0_EBX] =
1596 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1597 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1598 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1599 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1600 CPUID_7_0_EBX_SMAP,
1601 .features[FEAT_XSAVE] =
1602 CPUID_XSAVE_XSAVEOPT,
1603 .features[FEAT_6_EAX] =
1604 CPUID_6_EAX_ARAT,
1605 .xlevel = 0x80000008,
1606 .model_id = "Intel Core Processor (Broadwell)",
1609 .name = "Broadwell-IBRS",
1610 .level = 0xd,
1611 .vendor = CPUID_VENDOR_INTEL,
1612 .family = 6,
1613 .model = 61,
1614 .stepping = 2,
1615 .features[FEAT_1_EDX] =
1616 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1617 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1618 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1619 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1620 CPUID_DE | CPUID_FP87,
1621 .features[FEAT_1_ECX] =
1622 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1623 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1624 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1625 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1626 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1627 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1628 .features[FEAT_8000_0001_EDX] =
1629 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1630 CPUID_EXT2_SYSCALL,
1631 .features[FEAT_8000_0001_ECX] =
1632 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1633 .features[FEAT_7_0_EDX] =
1634 CPUID_7_0_EDX_SPEC_CTRL,
1635 .features[FEAT_7_0_EBX] =
1636 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1637 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1638 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1639 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1640 CPUID_7_0_EBX_SMAP,
1641 .features[FEAT_XSAVE] =
1642 CPUID_XSAVE_XSAVEOPT,
1643 .features[FEAT_6_EAX] =
1644 CPUID_6_EAX_ARAT,
1645 .xlevel = 0x80000008,
1646 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1649 .name = "Skylake-Client",
1650 .level = 0xd,
1651 .vendor = CPUID_VENDOR_INTEL,
1652 .family = 6,
1653 .model = 94,
1654 .stepping = 3,
1655 .features[FEAT_1_EDX] =
1656 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1657 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1658 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1659 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1660 CPUID_DE | CPUID_FP87,
1661 .features[FEAT_1_ECX] =
1662 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1663 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1664 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1665 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1666 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1667 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1668 .features[FEAT_8000_0001_EDX] =
1669 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1670 CPUID_EXT2_SYSCALL,
1671 .features[FEAT_8000_0001_ECX] =
1672 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1673 .features[FEAT_7_0_EBX] =
1674 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1675 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1676 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1677 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1678 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1679 /* Missing: XSAVES (not supported by some Linux versions,
1680 * including v4.1 to v4.12).
1681 * KVM doesn't yet expose any XSAVES state save component,
1682 * and the only one defined in Skylake (processor tracing)
1683 * probably will block migration anyway.
1685 .features[FEAT_XSAVE] =
1686 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1687 CPUID_XSAVE_XGETBV1,
1688 .features[FEAT_6_EAX] =
1689 CPUID_6_EAX_ARAT,
1690 .xlevel = 0x80000008,
1691 .model_id = "Intel Core Processor (Skylake)",
1694 .name = "Skylake-Client-IBRS",
1695 .level = 0xd,
1696 .vendor = CPUID_VENDOR_INTEL,
1697 .family = 6,
1698 .model = 94,
1699 .stepping = 3,
1700 .features[FEAT_1_EDX] =
1701 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1702 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1703 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1704 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1705 CPUID_DE | CPUID_FP87,
1706 .features[FEAT_1_ECX] =
1707 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1708 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1709 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1710 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1711 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1712 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1713 .features[FEAT_8000_0001_EDX] =
1714 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1715 CPUID_EXT2_SYSCALL,
1716 .features[FEAT_8000_0001_ECX] =
1717 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1718 .features[FEAT_7_0_EDX] =
1719 CPUID_7_0_EDX_SPEC_CTRL,
1720 .features[FEAT_7_0_EBX] =
1721 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1722 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1723 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1724 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1725 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1726 /* Missing: XSAVES (not supported by some Linux versions,
1727 * including v4.1 to v4.12).
1728 * KVM doesn't yet expose any XSAVES state save component,
1729 * and the only one defined in Skylake (processor tracing)
1730 * probably will block migration anyway.
1732 .features[FEAT_XSAVE] =
1733 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1734 CPUID_XSAVE_XGETBV1,
1735 .features[FEAT_6_EAX] =
1736 CPUID_6_EAX_ARAT,
1737 .xlevel = 0x80000008,
1738 .model_id = "Intel Core Processor (Skylake, IBRS)",
1741 .name = "Skylake-Server",
1742 .level = 0xd,
1743 .vendor = CPUID_VENDOR_INTEL,
1744 .family = 6,
1745 .model = 85,
1746 .stepping = 4,
1747 .features[FEAT_1_EDX] =
1748 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1749 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1750 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1751 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1752 CPUID_DE | CPUID_FP87,
1753 .features[FEAT_1_ECX] =
1754 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1755 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1756 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1757 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1758 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1759 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1760 .features[FEAT_8000_0001_EDX] =
1761 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1762 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1763 .features[FEAT_8000_0001_ECX] =
1764 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1765 .features[FEAT_7_0_EBX] =
1766 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1767 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1768 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1769 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1770 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1771 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1772 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1773 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1774 /* Missing: XSAVES (not supported by some Linux versions,
1775 * including v4.1 to v4.12).
1776 * KVM doesn't yet expose any XSAVES state save component,
1777 * and the only one defined in Skylake (processor tracing)
1778 * probably will block migration anyway.
1780 .features[FEAT_XSAVE] =
1781 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1782 CPUID_XSAVE_XGETBV1,
1783 .features[FEAT_6_EAX] =
1784 CPUID_6_EAX_ARAT,
1785 .xlevel = 0x80000008,
1786 .model_id = "Intel Xeon Processor (Skylake)",
1789 .name = "Skylake-Server-IBRS",
1790 .level = 0xd,
1791 .vendor = CPUID_VENDOR_INTEL,
1792 .family = 6,
1793 .model = 85,
1794 .stepping = 4,
1795 .features[FEAT_1_EDX] =
1796 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1797 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1798 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1799 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1800 CPUID_DE | CPUID_FP87,
1801 .features[FEAT_1_ECX] =
1802 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1803 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1804 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1805 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1806 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1807 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1808 .features[FEAT_8000_0001_EDX] =
1809 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1810 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1811 .features[FEAT_8000_0001_ECX] =
1812 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1813 .features[FEAT_7_0_EDX] =
1814 CPUID_7_0_EDX_SPEC_CTRL,
1815 .features[FEAT_7_0_EBX] =
1816 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1817 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1818 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1819 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1820 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1821 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1822 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1823 CPUID_7_0_EBX_AVX512VL,
1824 /* Missing: XSAVES (not supported by some Linux versions,
1825 * including v4.1 to v4.12).
1826 * KVM doesn't yet expose any XSAVES state save component,
1827 * and the only one defined in Skylake (processor tracing)
1828 * probably will block migration anyway.
1830 .features[FEAT_XSAVE] =
1831 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1832 CPUID_XSAVE_XGETBV1,
1833 .features[FEAT_6_EAX] =
1834 CPUID_6_EAX_ARAT,
1835 .xlevel = 0x80000008,
1836 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1839 .name = "Opteron_G1",
1840 .level = 5,
1841 .vendor = CPUID_VENDOR_AMD,
1842 .family = 15,
1843 .model = 6,
1844 .stepping = 1,
1845 .features[FEAT_1_EDX] =
1846 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1847 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1848 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1849 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1850 CPUID_DE | CPUID_FP87,
1851 .features[FEAT_1_ECX] =
1852 CPUID_EXT_SSE3,
1853 .features[FEAT_8000_0001_EDX] =
1854 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1855 .xlevel = 0x80000008,
1856 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1859 .name = "Opteron_G2",
1860 .level = 5,
1861 .vendor = CPUID_VENDOR_AMD,
1862 .family = 15,
1863 .model = 6,
1864 .stepping = 1,
1865 .features[FEAT_1_EDX] =
1866 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1867 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1868 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1869 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1870 CPUID_DE | CPUID_FP87,
1871 .features[FEAT_1_ECX] =
1872 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1873 /* Missing: CPUID_EXT2_RDTSCP */
1874 .features[FEAT_8000_0001_EDX] =
1875 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1876 .features[FEAT_8000_0001_ECX] =
1877 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1878 .xlevel = 0x80000008,
1879 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1882 .name = "Opteron_G3",
1883 .level = 5,
1884 .vendor = CPUID_VENDOR_AMD,
1885 .family = 16,
1886 .model = 2,
1887 .stepping = 3,
1888 .features[FEAT_1_EDX] =
1889 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1890 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1891 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1892 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1893 CPUID_DE | CPUID_FP87,
1894 .features[FEAT_1_ECX] =
1895 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1896 CPUID_EXT_SSE3,
1897 /* Missing: CPUID_EXT2_RDTSCP */
1898 .features[FEAT_8000_0001_EDX] =
1899 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1900 .features[FEAT_8000_0001_ECX] =
1901 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1902 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1903 .xlevel = 0x80000008,
1904 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1907 .name = "Opteron_G4",
1908 .level = 0xd,
1909 .vendor = CPUID_VENDOR_AMD,
1910 .family = 21,
1911 .model = 1,
1912 .stepping = 2,
1913 .features[FEAT_1_EDX] =
1914 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1915 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1916 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1917 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1918 CPUID_DE | CPUID_FP87,
1919 .features[FEAT_1_ECX] =
1920 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1921 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1922 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1923 CPUID_EXT_SSE3,
1924 /* Missing: CPUID_EXT2_RDTSCP */
1925 .features[FEAT_8000_0001_EDX] =
1926 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1927 CPUID_EXT2_SYSCALL,
1928 .features[FEAT_8000_0001_ECX] =
1929 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1930 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1931 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1932 CPUID_EXT3_LAHF_LM,
1933 /* no xsaveopt! */
1934 .xlevel = 0x8000001A,
1935 .model_id = "AMD Opteron 62xx class CPU",
1938 .name = "Opteron_G5",
1939 .level = 0xd,
1940 .vendor = CPUID_VENDOR_AMD,
1941 .family = 21,
1942 .model = 2,
1943 .stepping = 0,
1944 .features[FEAT_1_EDX] =
1945 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1946 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1947 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1948 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1949 CPUID_DE | CPUID_FP87,
1950 .features[FEAT_1_ECX] =
1951 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1952 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1953 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1954 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1955 /* Missing: CPUID_EXT2_RDTSCP */
1956 .features[FEAT_8000_0001_EDX] =
1957 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1958 CPUID_EXT2_SYSCALL,
1959 .features[FEAT_8000_0001_ECX] =
1960 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1961 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1962 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1963 CPUID_EXT3_LAHF_LM,
1964 /* no xsaveopt! */
1965 .xlevel = 0x8000001A,
1966 .model_id = "AMD Opteron 63xx class CPU",
1969 .name = "EPYC",
1970 .level = 0xd,
1971 .vendor = CPUID_VENDOR_AMD,
1972 .family = 23,
1973 .model = 1,
1974 .stepping = 2,
1975 .features[FEAT_1_EDX] =
1976 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1977 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1978 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1979 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1980 CPUID_VME | CPUID_FP87,
1981 .features[FEAT_1_ECX] =
1982 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1983 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1984 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1985 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1986 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1987 .features[FEAT_8000_0001_EDX] =
1988 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1989 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1990 CPUID_EXT2_SYSCALL,
1991 .features[FEAT_8000_0001_ECX] =
1992 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1993 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1994 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1995 .features[FEAT_7_0_EBX] =
1996 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1997 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1998 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1999 CPUID_7_0_EBX_SHA_NI,
2000 /* Missing: XSAVES (not supported by some Linux versions,
2001 * including v4.1 to v4.12).
2002 * KVM doesn't yet expose any XSAVES state save component.
2004 .features[FEAT_XSAVE] =
2005 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2006 CPUID_XSAVE_XGETBV1,
2007 .features[FEAT_6_EAX] =
2008 CPUID_6_EAX_ARAT,
2009 .xlevel = 0x8000000A,
2010 .model_id = "AMD EPYC Processor",
2013 .name = "EPYC-IBPB",
2014 .level = 0xd,
2015 .vendor = CPUID_VENDOR_AMD,
2016 .family = 23,
2017 .model = 1,
2018 .stepping = 2,
2019 .features[FEAT_1_EDX] =
2020 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2021 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2022 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2023 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2024 CPUID_VME | CPUID_FP87,
2025 .features[FEAT_1_ECX] =
2026 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2027 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2028 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2029 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2030 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2031 .features[FEAT_8000_0001_EDX] =
2032 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2033 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2034 CPUID_EXT2_SYSCALL,
2035 .features[FEAT_8000_0001_ECX] =
2036 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2037 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2038 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2039 .features[FEAT_8000_0008_EBX] =
2040 CPUID_8000_0008_EBX_IBPB,
2041 .features[FEAT_7_0_EBX] =
2042 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2043 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2044 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2045 CPUID_7_0_EBX_SHA_NI,
2046 /* Missing: XSAVES (not supported by some Linux versions,
2047 * including v4.1 to v4.12).
2048 * KVM doesn't yet expose any XSAVES state save component.
2050 .features[FEAT_XSAVE] =
2051 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2052 CPUID_XSAVE_XGETBV1,
2053 .features[FEAT_6_EAX] =
2054 CPUID_6_EAX_ARAT,
2055 .xlevel = 0x8000000A,
2056 .model_id = "AMD EPYC Processor (with IBPB)",
2060 typedef struct PropValue {
2061 const char *prop, *value;
2062 } PropValue;
2064 /* KVM-specific features that are automatically added/removed
2065 * from all CPU models when KVM is enabled.
2067 static PropValue kvm_default_props[] = {
2068 { "kvmclock", "on" },
2069 { "kvm-nopiodelay", "on" },
2070 { "kvm-asyncpf", "on" },
2071 { "kvm-steal-time", "on" },
2072 { "kvm-pv-eoi", "on" },
2073 { "kvmclock-stable-bit", "on" },
2074 { "x2apic", "on" },
2075 { "acpi", "off" },
2076 { "monitor", "off" },
2077 { "svm", "off" },
2078 { NULL, NULL },
2081 /* TCG-specific defaults that override all CPU models when using TCG
2083 static PropValue tcg_default_props[] = {
2084 { "vme", "off" },
2085 { NULL, NULL },
2089 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2091 PropValue *pv;
2092 for (pv = kvm_default_props; pv->prop; pv++) {
2093 if (!strcmp(pv->prop, prop)) {
2094 pv->value = value;
2095 break;
2099 /* It is valid to call this function only for properties that
2100 * are already present in the kvm_default_props table.
2102 assert(pv->prop);
2105 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2106 bool migratable_only);
2108 static bool lmce_supported(void)
2110 uint64_t mce_cap = 0;
2112 #ifdef CONFIG_KVM
2113 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2114 return false;
2116 #endif
2118 return !!(mce_cap & MCG_LMCE_P);
2121 #define CPUID_MODEL_ID_SZ 48
2124 * cpu_x86_fill_model_id:
2125 * Get CPUID model ID string from host CPU.
2127 * @str should have at least CPUID_MODEL_ID_SZ bytes
2129 * The function does NOT add a null terminator to the string
2130 * automatically.
2132 static int cpu_x86_fill_model_id(char *str)
2134 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2135 int i;
2137 for (i = 0; i < 3; i++) {
2138 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2139 memcpy(str + i * 16 + 0, &eax, 4);
2140 memcpy(str + i * 16 + 4, &ebx, 4);
2141 memcpy(str + i * 16 + 8, &ecx, 4);
2142 memcpy(str + i * 16 + 12, &edx, 4);
2144 return 0;
2147 static Property max_x86_cpu_properties[] = {
2148 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2149 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2150 DEFINE_PROP_END_OF_LIST()
2153 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2155 DeviceClass *dc = DEVICE_CLASS(oc);
2156 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2158 xcc->ordering = 9;
2160 xcc->model_description =
2161 "Enables all features supported by the accelerator in the current host";
2163 dc->props = max_x86_cpu_properties;
2166 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2168 static void max_x86_cpu_initfn(Object *obj)
2170 X86CPU *cpu = X86_CPU(obj);
2171 CPUX86State *env = &cpu->env;
2172 KVMState *s = kvm_state;
2174 /* We can't fill the features array here because we don't know yet if
2175 * "migratable" is true or false.
2177 cpu->max_features = true;
2179 if (accel_uses_host_cpuid()) {
2180 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2181 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2182 int family, model, stepping;
2183 X86CPUDefinition host_cpudef = { };
2184 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2186 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2187 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2189 host_vendor_fms(vendor, &family, &model, &stepping);
2191 cpu_x86_fill_model_id(model_id);
2193 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2194 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2195 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2196 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2197 &error_abort);
2198 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2199 &error_abort);
2201 if (kvm_enabled()) {
2202 env->cpuid_min_level =
2203 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2204 env->cpuid_min_xlevel =
2205 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2206 env->cpuid_min_xlevel2 =
2207 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2208 } else {
2209 env->cpuid_min_level =
2210 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2211 env->cpuid_min_xlevel =
2212 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2213 env->cpuid_min_xlevel2 =
2214 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2217 if (lmce_supported()) {
2218 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2220 } else {
2221 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2222 "vendor", &error_abort);
2223 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2224 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2225 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2226 object_property_set_str(OBJECT(cpu),
2227 "QEMU TCG CPU version " QEMU_HW_VERSION,
2228 "model-id", &error_abort);
2231 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2234 static const TypeInfo max_x86_cpu_type_info = {
2235 .name = X86_CPU_TYPE_NAME("max"),
2236 .parent = TYPE_X86_CPU,
2237 .instance_init = max_x86_cpu_initfn,
2238 .class_init = max_x86_cpu_class_init,
2241 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2242 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2244 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2246 xcc->host_cpuid_required = true;
2247 xcc->ordering = 8;
2249 if (kvm_enabled()) {
2250 xcc->model_description =
2251 "KVM processor with all supported host features ";
2252 } else if (hvf_enabled()) {
2253 xcc->model_description =
2254 "HVF processor with all supported host features ";
2258 static const TypeInfo host_x86_cpu_type_info = {
2259 .name = X86_CPU_TYPE_NAME("host"),
2260 .parent = X86_CPU_TYPE_NAME("max"),
2261 .class_init = host_x86_cpu_class_init,
2264 #endif
2266 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2268 FeatureWordInfo *f = &feature_word_info[w];
2269 int i;
2271 for (i = 0; i < 32; ++i) {
2272 if ((1UL << i) & mask) {
2273 const char *reg = get_register_name_32(f->cpuid_reg);
2274 assert(reg);
2275 warn_report("%s doesn't support requested feature: "
2276 "CPUID.%02XH:%s%s%s [bit %d]",
2277 accel_uses_host_cpuid() ? "host" : "TCG",
2278 f->cpuid_eax, reg,
2279 f->feat_names[i] ? "." : "",
2280 f->feat_names[i] ? f->feat_names[i] : "", i);
2285 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2286 const char *name, void *opaque,
2287 Error **errp)
2289 X86CPU *cpu = X86_CPU(obj);
2290 CPUX86State *env = &cpu->env;
2291 int64_t value;
2293 value = (env->cpuid_version >> 8) & 0xf;
2294 if (value == 0xf) {
2295 value += (env->cpuid_version >> 20) & 0xff;
2297 visit_type_int(v, name, &value, errp);
2300 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2301 const char *name, void *opaque,
2302 Error **errp)
2304 X86CPU *cpu = X86_CPU(obj);
2305 CPUX86State *env = &cpu->env;
2306 const int64_t min = 0;
2307 const int64_t max = 0xff + 0xf;
2308 Error *local_err = NULL;
2309 int64_t value;
2311 visit_type_int(v, name, &value, &local_err);
2312 if (local_err) {
2313 error_propagate(errp, local_err);
2314 return;
2316 if (value < min || value > max) {
2317 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2318 name ? name : "null", value, min, max);
2319 return;
2322 env->cpuid_version &= ~0xff00f00;
2323 if (value > 0x0f) {
2324 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2325 } else {
2326 env->cpuid_version |= value << 8;
2330 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2331 const char *name, void *opaque,
2332 Error **errp)
2334 X86CPU *cpu = X86_CPU(obj);
2335 CPUX86State *env = &cpu->env;
2336 int64_t value;
2338 value = (env->cpuid_version >> 4) & 0xf;
2339 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2340 visit_type_int(v, name, &value, errp);
2343 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2344 const char *name, void *opaque,
2345 Error **errp)
2347 X86CPU *cpu = X86_CPU(obj);
2348 CPUX86State *env = &cpu->env;
2349 const int64_t min = 0;
2350 const int64_t max = 0xff;
2351 Error *local_err = NULL;
2352 int64_t value;
2354 visit_type_int(v, name, &value, &local_err);
2355 if (local_err) {
2356 error_propagate(errp, local_err);
2357 return;
2359 if (value < min || value > max) {
2360 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2361 name ? name : "null", value, min, max);
2362 return;
2365 env->cpuid_version &= ~0xf00f0;
2366 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2369 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2370 const char *name, void *opaque,
2371 Error **errp)
2373 X86CPU *cpu = X86_CPU(obj);
2374 CPUX86State *env = &cpu->env;
2375 int64_t value;
2377 value = env->cpuid_version & 0xf;
2378 visit_type_int(v, name, &value, errp);
2381 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2382 const char *name, void *opaque,
2383 Error **errp)
2385 X86CPU *cpu = X86_CPU(obj);
2386 CPUX86State *env = &cpu->env;
2387 const int64_t min = 0;
2388 const int64_t max = 0xf;
2389 Error *local_err = NULL;
2390 int64_t value;
2392 visit_type_int(v, name, &value, &local_err);
2393 if (local_err) {
2394 error_propagate(errp, local_err);
2395 return;
2397 if (value < min || value > max) {
2398 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2399 name ? name : "null", value, min, max);
2400 return;
2403 env->cpuid_version &= ~0xf;
2404 env->cpuid_version |= value & 0xf;
2407 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2409 X86CPU *cpu = X86_CPU(obj);
2410 CPUX86State *env = &cpu->env;
2411 char *value;
2413 value = g_malloc(CPUID_VENDOR_SZ + 1);
2414 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2415 env->cpuid_vendor3);
2416 return value;
2419 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2420 Error **errp)
2422 X86CPU *cpu = X86_CPU(obj);
2423 CPUX86State *env = &cpu->env;
2424 int i;
2426 if (strlen(value) != CPUID_VENDOR_SZ) {
2427 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2428 return;
2431 env->cpuid_vendor1 = 0;
2432 env->cpuid_vendor2 = 0;
2433 env->cpuid_vendor3 = 0;
2434 for (i = 0; i < 4; i++) {
2435 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2436 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2437 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2441 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2443 X86CPU *cpu = X86_CPU(obj);
2444 CPUX86State *env = &cpu->env;
2445 char *value;
2446 int i;
2448 value = g_malloc(48 + 1);
2449 for (i = 0; i < 48; i++) {
2450 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2452 value[48] = '\0';
2453 return value;
2456 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2457 Error **errp)
2459 X86CPU *cpu = X86_CPU(obj);
2460 CPUX86State *env = &cpu->env;
2461 int c, len, i;
2463 if (model_id == NULL) {
2464 model_id = "";
2466 len = strlen(model_id);
2467 memset(env->cpuid_model, 0, 48);
2468 for (i = 0; i < 48; i++) {
2469 if (i >= len) {
2470 c = '\0';
2471 } else {
2472 c = (uint8_t)model_id[i];
2474 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2478 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2479 void *opaque, Error **errp)
2481 X86CPU *cpu = X86_CPU(obj);
2482 int64_t value;
2484 value = cpu->env.tsc_khz * 1000;
2485 visit_type_int(v, name, &value, errp);
2488 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2489 void *opaque, Error **errp)
2491 X86CPU *cpu = X86_CPU(obj);
2492 const int64_t min = 0;
2493 const int64_t max = INT64_MAX;
2494 Error *local_err = NULL;
2495 int64_t value;
2497 visit_type_int(v, name, &value, &local_err);
2498 if (local_err) {
2499 error_propagate(errp, local_err);
2500 return;
2502 if (value < min || value > max) {
2503 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2504 name ? name : "null", value, min, max);
2505 return;
2508 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2511 /* Generic getter for "feature-words" and "filtered-features" properties */
2512 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2513 const char *name, void *opaque,
2514 Error **errp)
2516 uint32_t *array = (uint32_t *)opaque;
2517 FeatureWord w;
2518 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2519 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2520 X86CPUFeatureWordInfoList *list = NULL;
2522 for (w = 0; w < FEATURE_WORDS; w++) {
2523 FeatureWordInfo *wi = &feature_word_info[w];
2524 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2525 qwi->cpuid_input_eax = wi->cpuid_eax;
2526 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2527 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2528 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2529 qwi->features = array[w];
2531 /* List will be in reverse order, but order shouldn't matter */
2532 list_entries[w].next = list;
2533 list_entries[w].value = &word_infos[w];
2534 list = &list_entries[w];
2537 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2540 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2541 void *opaque, Error **errp)
2543 X86CPU *cpu = X86_CPU(obj);
2544 int64_t value = cpu->hyperv_spinlock_attempts;
2546 visit_type_int(v, name, &value, errp);
2549 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2550 void *opaque, Error **errp)
2552 const int64_t min = 0xFFF;
2553 const int64_t max = UINT_MAX;
2554 X86CPU *cpu = X86_CPU(obj);
2555 Error *err = NULL;
2556 int64_t value;
2558 visit_type_int(v, name, &value, &err);
2559 if (err) {
2560 error_propagate(errp, err);
2561 return;
2564 if (value < min || value > max) {
2565 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2566 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2567 object_get_typename(obj), name ? name : "null",
2568 value, min, max);
2569 return;
2571 cpu->hyperv_spinlock_attempts = value;
2574 static const PropertyInfo qdev_prop_spinlocks = {
2575 .name = "int",
2576 .get = x86_get_hv_spinlocks,
2577 .set = x86_set_hv_spinlocks,
2580 /* Convert all '_' in a feature string option name to '-', to make feature
2581 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2583 static inline void feat2prop(char *s)
2585 while ((s = strchr(s, '_'))) {
2586 *s = '-';
2590 /* Return the feature property name for a feature flag bit */
2591 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2593 /* XSAVE components are automatically enabled by other features,
2594 * so return the original feature name instead
2596 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2597 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2599 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2600 x86_ext_save_areas[comp].bits) {
2601 w = x86_ext_save_areas[comp].feature;
2602 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2606 assert(bitnr < 32);
2607 assert(w < FEATURE_WORDS);
2608 return feature_word_info[w].feat_names[bitnr];
2611 /* Compatibily hack to maintain legacy +-feat semantic,
2612 * where +-feat overwrites any feature set by
2613 * feat=on|feat even if the later is parsed after +-feat
2614 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2616 static GList *plus_features, *minus_features;
2618 static gint compare_string(gconstpointer a, gconstpointer b)
2620 return g_strcmp0(a, b);
2623 /* Parse "+feature,-feature,feature=foo" CPU feature string
2625 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2626 Error **errp)
2628 char *featurestr; /* Single 'key=value" string being parsed */
2629 static bool cpu_globals_initialized;
2630 bool ambiguous = false;
2632 if (cpu_globals_initialized) {
2633 return;
2635 cpu_globals_initialized = true;
2637 if (!features) {
2638 return;
2641 for (featurestr = strtok(features, ",");
2642 featurestr;
2643 featurestr = strtok(NULL, ",")) {
2644 const char *name;
2645 const char *val = NULL;
2646 char *eq = NULL;
2647 char num[32];
2648 GlobalProperty *prop;
2650 /* Compatibility syntax: */
2651 if (featurestr[0] == '+') {
2652 plus_features = g_list_append(plus_features,
2653 g_strdup(featurestr + 1));
2654 continue;
2655 } else if (featurestr[0] == '-') {
2656 minus_features = g_list_append(minus_features,
2657 g_strdup(featurestr + 1));
2658 continue;
2661 eq = strchr(featurestr, '=');
2662 if (eq) {
2663 *eq++ = 0;
2664 val = eq;
2665 } else {
2666 val = "on";
2669 feat2prop(featurestr);
2670 name = featurestr;
2672 if (g_list_find_custom(plus_features, name, compare_string)) {
2673 warn_report("Ambiguous CPU model string. "
2674 "Don't mix both \"+%s\" and \"%s=%s\"",
2675 name, name, val);
2676 ambiguous = true;
2678 if (g_list_find_custom(minus_features, name, compare_string)) {
2679 warn_report("Ambiguous CPU model string. "
2680 "Don't mix both \"-%s\" and \"%s=%s\"",
2681 name, name, val);
2682 ambiguous = true;
2685 /* Special case: */
2686 if (!strcmp(name, "tsc-freq")) {
2687 int ret;
2688 uint64_t tsc_freq;
2690 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2691 if (ret < 0 || tsc_freq > INT64_MAX) {
2692 error_setg(errp, "bad numerical value %s", val);
2693 return;
2695 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2696 val = num;
2697 name = "tsc-frequency";
2700 prop = g_new0(typeof(*prop), 1);
2701 prop->driver = typename;
2702 prop->property = g_strdup(name);
2703 prop->value = g_strdup(val);
2704 prop->errp = &error_fatal;
2705 qdev_prop_register_global(prop);
2708 if (ambiguous) {
2709 warn_report("Compatibility of ambiguous CPU model "
2710 "strings won't be kept on future QEMU versions");
2714 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2715 static int x86_cpu_filter_features(X86CPU *cpu);
2717 /* Check for missing features that may prevent the CPU class from
2718 * running using the current machine and accelerator.
2720 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2721 strList **missing_feats)
2723 X86CPU *xc;
2724 FeatureWord w;
2725 Error *err = NULL;
2726 strList **next = missing_feats;
2728 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2729 strList *new = g_new0(strList, 1);
2730 new->value = g_strdup("kvm");
2731 *missing_feats = new;
2732 return;
2735 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2737 x86_cpu_expand_features(xc, &err);
2738 if (err) {
2739 /* Errors at x86_cpu_expand_features should never happen,
2740 * but in case it does, just report the model as not
2741 * runnable at all using the "type" property.
2743 strList *new = g_new0(strList, 1);
2744 new->value = g_strdup("type");
2745 *next = new;
2746 next = &new->next;
2749 x86_cpu_filter_features(xc);
2751 for (w = 0; w < FEATURE_WORDS; w++) {
2752 uint32_t filtered = xc->filtered_features[w];
2753 int i;
2754 for (i = 0; i < 32; i++) {
2755 if (filtered & (1UL << i)) {
2756 strList *new = g_new0(strList, 1);
2757 new->value = g_strdup(x86_cpu_feature_name(w, i));
2758 *next = new;
2759 next = &new->next;
2764 object_unref(OBJECT(xc));
2767 /* Print all cpuid feature names in featureset
2769 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2771 int bit;
2772 bool first = true;
2774 for (bit = 0; bit < 32; bit++) {
2775 if (featureset[bit]) {
2776 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2777 first = false;
2782 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2783 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2785 ObjectClass *class_a = (ObjectClass *)a;
2786 ObjectClass *class_b = (ObjectClass *)b;
2787 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2788 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2789 const char *name_a, *name_b;
2791 if (cc_a->ordering != cc_b->ordering) {
2792 return cc_a->ordering - cc_b->ordering;
2793 } else {
2794 name_a = object_class_get_name(class_a);
2795 name_b = object_class_get_name(class_b);
2796 return strcmp(name_a, name_b);
2800 static GSList *get_sorted_cpu_model_list(void)
2802 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2803 list = g_slist_sort(list, x86_cpu_list_compare);
2804 return list;
2807 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2809 ObjectClass *oc = data;
2810 X86CPUClass *cc = X86_CPU_CLASS(oc);
2811 CPUListState *s = user_data;
2812 char *name = x86_cpu_class_get_model_name(cc);
2813 const char *desc = cc->model_description;
2814 if (!desc && cc->cpu_def) {
2815 desc = cc->cpu_def->model_id;
2818 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2819 name, desc);
2820 g_free(name);
2823 /* list available CPU models and flags */
2824 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2826 int i;
2827 CPUListState s = {
2828 .file = f,
2829 .cpu_fprintf = cpu_fprintf,
2831 GSList *list;
2833 (*cpu_fprintf)(f, "Available CPUs:\n");
2834 list = get_sorted_cpu_model_list();
2835 g_slist_foreach(list, x86_cpu_list_entry, &s);
2836 g_slist_free(list);
2838 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2839 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2840 FeatureWordInfo *fw = &feature_word_info[i];
2842 (*cpu_fprintf)(f, " ");
2843 listflags(f, cpu_fprintf, fw->feat_names);
2844 (*cpu_fprintf)(f, "\n");
2848 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2850 ObjectClass *oc = data;
2851 X86CPUClass *cc = X86_CPU_CLASS(oc);
2852 CpuDefinitionInfoList **cpu_list = user_data;
2853 CpuDefinitionInfoList *entry;
2854 CpuDefinitionInfo *info;
2856 info = g_malloc0(sizeof(*info));
2857 info->name = x86_cpu_class_get_model_name(cc);
2858 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2859 info->has_unavailable_features = true;
2860 info->q_typename = g_strdup(object_class_get_name(oc));
2861 info->migration_safe = cc->migration_safe;
2862 info->has_migration_safe = true;
2863 info->q_static = cc->static_model;
2865 entry = g_malloc0(sizeof(*entry));
2866 entry->value = info;
2867 entry->next = *cpu_list;
2868 *cpu_list = entry;
2871 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2873 CpuDefinitionInfoList *cpu_list = NULL;
2874 GSList *list = get_sorted_cpu_model_list();
2875 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2876 g_slist_free(list);
2877 return cpu_list;
2880 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2881 bool migratable_only)
2883 FeatureWordInfo *wi = &feature_word_info[w];
2884 uint32_t r;
2886 if (kvm_enabled()) {
2887 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2888 wi->cpuid_ecx,
2889 wi->cpuid_reg);
2890 } else if (hvf_enabled()) {
2891 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2892 wi->cpuid_ecx,
2893 wi->cpuid_reg);
2894 } else if (tcg_enabled()) {
2895 r = wi->tcg_features;
2896 } else {
2897 return ~0;
2899 if (migratable_only) {
2900 r &= x86_cpu_get_migratable_flags(w);
2902 return r;
2905 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2907 FeatureWord w;
2909 for (w = 0; w < FEATURE_WORDS; w++) {
2910 report_unavailable_features(w, cpu->filtered_features[w]);
2914 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2916 PropValue *pv;
2917 for (pv = props; pv->prop; pv++) {
2918 if (!pv->value) {
2919 continue;
2921 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2922 &error_abort);
2926 /* Load data from X86CPUDefinition into a X86CPU object
2928 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2930 CPUX86State *env = &cpu->env;
2931 const char *vendor;
2932 char host_vendor[CPUID_VENDOR_SZ + 1];
2933 FeatureWord w;
2935 /*NOTE: any property set by this function should be returned by
2936 * x86_cpu_static_props(), so static expansion of
2937 * query-cpu-model-expansion is always complete.
2940 /* CPU models only set _minimum_ values for level/xlevel: */
2941 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2942 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2944 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2945 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2946 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2947 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2948 for (w = 0; w < FEATURE_WORDS; w++) {
2949 env->features[w] = def->features[w];
2952 /* Special cases not set in the X86CPUDefinition structs: */
2953 /* TODO: in-kernel irqchip for hvf */
2954 if (kvm_enabled()) {
2955 if (!kvm_irqchip_in_kernel()) {
2956 x86_cpu_change_kvm_default("x2apic", "off");
2959 x86_cpu_apply_props(cpu, kvm_default_props);
2960 } else if (tcg_enabled()) {
2961 x86_cpu_apply_props(cpu, tcg_default_props);
2964 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2966 /* sysenter isn't supported in compatibility mode on AMD,
2967 * syscall isn't supported in compatibility mode on Intel.
2968 * Normally we advertise the actual CPU vendor, but you can
2969 * override this using the 'vendor' property if you want to use
2970 * KVM's sysenter/syscall emulation in compatibility mode and
2971 * when doing cross vendor migration
2973 vendor = def->vendor;
2974 if (accel_uses_host_cpuid()) {
2975 uint32_t ebx = 0, ecx = 0, edx = 0;
2976 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2977 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2978 vendor = host_vendor;
2981 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2985 /* Return a QDict containing keys for all properties that can be included
2986 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2987 * must be included in the dictionary.
2989 static QDict *x86_cpu_static_props(void)
2991 FeatureWord w;
2992 int i;
2993 static const char *props[] = {
2994 "min-level",
2995 "min-xlevel",
2996 "family",
2997 "model",
2998 "stepping",
2999 "model-id",
3000 "vendor",
3001 "lmce",
3002 NULL,
3004 static QDict *d;
3006 if (d) {
3007 return d;
3010 d = qdict_new();
3011 for (i = 0; props[i]; i++) {
3012 qdict_put_null(d, props[i]);
3015 for (w = 0; w < FEATURE_WORDS; w++) {
3016 FeatureWordInfo *fi = &feature_word_info[w];
3017 int bit;
3018 for (bit = 0; bit < 32; bit++) {
3019 if (!fi->feat_names[bit]) {
3020 continue;
3022 qdict_put_null(d, fi->feat_names[bit]);
3026 return d;
3029 /* Add an entry to @props dict, with the value for property. */
3030 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3032 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3033 &error_abort);
3035 qdict_put_obj(props, prop, value);
3038 /* Convert CPU model data from X86CPU object to a property dictionary
3039 * that can recreate exactly the same CPU model.
3041 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3043 QDict *sprops = x86_cpu_static_props();
3044 const QDictEntry *e;
3046 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3047 const char *prop = qdict_entry_key(e);
3048 x86_cpu_expand_prop(cpu, props, prop);
3052 /* Convert CPU model data from X86CPU object to a property dictionary
3053 * that can recreate exactly the same CPU model, including every
3054 * writeable QOM property.
3056 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3058 ObjectPropertyIterator iter;
3059 ObjectProperty *prop;
3061 object_property_iter_init(&iter, OBJECT(cpu));
3062 while ((prop = object_property_iter_next(&iter))) {
3063 /* skip read-only or write-only properties */
3064 if (!prop->get || !prop->set) {
3065 continue;
3068 /* "hotplugged" is the only property that is configurable
3069 * on the command-line but will be set differently on CPUs
3070 * created using "-cpu ... -smp ..." and by CPUs created
3071 * on the fly by x86_cpu_from_model() for querying. Skip it.
3073 if (!strcmp(prop->name, "hotplugged")) {
3074 continue;
3076 x86_cpu_expand_prop(cpu, props, prop->name);
3080 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3082 const QDictEntry *prop;
3083 Error *err = NULL;
3085 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3086 object_property_set_qobject(obj, qdict_entry_value(prop),
3087 qdict_entry_key(prop), &err);
3088 if (err) {
3089 break;
3093 error_propagate(errp, err);
3096 /* Create X86CPU object according to model+props specification */
3097 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3099 X86CPU *xc = NULL;
3100 X86CPUClass *xcc;
3101 Error *err = NULL;
3103 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3104 if (xcc == NULL) {
3105 error_setg(&err, "CPU model '%s' not found", model);
3106 goto out;
3109 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3110 if (props) {
3111 object_apply_props(OBJECT(xc), props, &err);
3112 if (err) {
3113 goto out;
3117 x86_cpu_expand_features(xc, &err);
3118 if (err) {
3119 goto out;
3122 out:
3123 if (err) {
3124 error_propagate(errp, err);
3125 object_unref(OBJECT(xc));
3126 xc = NULL;
3128 return xc;
3131 CpuModelExpansionInfo *
3132 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3133 CpuModelInfo *model,
3134 Error **errp)
3136 X86CPU *xc = NULL;
3137 Error *err = NULL;
3138 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3139 QDict *props = NULL;
3140 const char *base_name;
3142 xc = x86_cpu_from_model(model->name,
3143 model->has_props ?
3144 qobject_to_qdict(model->props) :
3145 NULL, &err);
3146 if (err) {
3147 goto out;
3150 props = qdict_new();
3152 switch (type) {
3153 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3154 /* Static expansion will be based on "base" only */
3155 base_name = "base";
3156 x86_cpu_to_dict(xc, props);
3157 break;
3158 case CPU_MODEL_EXPANSION_TYPE_FULL:
3159 /* As we don't return every single property, full expansion needs
3160 * to keep the original model name+props, and add extra
3161 * properties on top of that.
3163 base_name = model->name;
3164 x86_cpu_to_dict_full(xc, props);
3165 break;
3166 default:
3167 error_setg(&err, "Unsupportted expansion type");
3168 goto out;
3171 if (!props) {
3172 props = qdict_new();
3174 x86_cpu_to_dict(xc, props);
3176 ret->model = g_new0(CpuModelInfo, 1);
3177 ret->model->name = g_strdup(base_name);
3178 ret->model->props = QOBJECT(props);
3179 ret->model->has_props = true;
3181 out:
3182 object_unref(OBJECT(xc));
3183 if (err) {
3184 error_propagate(errp, err);
3185 qapi_free_CpuModelExpansionInfo(ret);
3186 ret = NULL;
3188 return ret;
3191 static gchar *x86_gdb_arch_name(CPUState *cs)
3193 #ifdef TARGET_X86_64
3194 return g_strdup("i386:x86-64");
3195 #else
3196 return g_strdup("i386");
3197 #endif
3200 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3202 X86CPUDefinition *cpudef = data;
3203 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3205 xcc->cpu_def = cpudef;
3206 xcc->migration_safe = true;
3209 static void x86_register_cpudef_type(X86CPUDefinition *def)
3211 char *typename = x86_cpu_type_name(def->name);
3212 TypeInfo ti = {
3213 .name = typename,
3214 .parent = TYPE_X86_CPU,
3215 .class_init = x86_cpu_cpudef_class_init,
3216 .class_data = def,
3219 /* AMD aliases are handled at runtime based on CPUID vendor, so
3220 * they shouldn't be set on the CPU model table.
3222 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3223 /* catch mistakes instead of silently truncating model_id when too long */
3224 assert(def->model_id && strlen(def->model_id) <= 48);
3227 type_register(&ti);
3228 g_free(typename);
3231 #if !defined(CONFIG_USER_ONLY)
3233 void cpu_clear_apic_feature(CPUX86State *env)
3235 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3238 #endif /* !CONFIG_USER_ONLY */
3240 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3241 uint32_t *eax, uint32_t *ebx,
3242 uint32_t *ecx, uint32_t *edx)
3244 X86CPU *cpu = x86_env_get_cpu(env);
3245 CPUState *cs = CPU(cpu);
3246 uint32_t pkg_offset;
3247 uint32_t limit;
3248 uint32_t signature[3];
3250 /* Calculate & apply limits for different index ranges */
3251 if (index >= 0xC0000000) {
3252 limit = env->cpuid_xlevel2;
3253 } else if (index >= 0x80000000) {
3254 limit = env->cpuid_xlevel;
3255 } else if (index >= 0x40000000) {
3256 limit = 0x40000001;
3257 } else {
3258 limit = env->cpuid_level;
3261 if (index > limit) {
3262 /* Intel documentation states that invalid EAX input will
3263 * return the same information as EAX=cpuid_level
3264 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3266 index = env->cpuid_level;
3269 switch(index) {
3270 case 0:
3271 *eax = env->cpuid_level;
3272 *ebx = env->cpuid_vendor1;
3273 *edx = env->cpuid_vendor2;
3274 *ecx = env->cpuid_vendor3;
3275 break;
3276 case 1:
3277 *eax = env->cpuid_version;
3278 *ebx = (cpu->apic_id << 24) |
3279 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3280 *ecx = env->features[FEAT_1_ECX];
3281 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3282 *ecx |= CPUID_EXT_OSXSAVE;
3284 *edx = env->features[FEAT_1_EDX];
3285 if (cs->nr_cores * cs->nr_threads > 1) {
3286 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3287 *edx |= CPUID_HT;
3289 break;
3290 case 2:
3291 /* cache info: needed for Pentium Pro compatibility */
3292 if (cpu->cache_info_passthrough) {
3293 host_cpuid(index, 0, eax, ebx, ecx, edx);
3294 break;
3296 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3297 *ebx = 0;
3298 if (!cpu->enable_l3_cache) {
3299 *ecx = 0;
3300 } else {
3301 *ecx = L3_N_DESCRIPTOR;
3303 *edx = (L1D_DESCRIPTOR << 16) | \
3304 (L1I_DESCRIPTOR << 8) | \
3305 (L2_DESCRIPTOR);
3306 break;
3307 case 4:
3308 /* cache info: needed for Core compatibility */
3309 if (cpu->cache_info_passthrough) {
3310 host_cpuid(index, count, eax, ebx, ecx, edx);
3311 *eax &= ~0xFC000000;
3312 } else {
3313 *eax = 0;
3314 switch (count) {
3315 case 0: /* L1 dcache info */
3316 *eax |= CPUID_4_TYPE_DCACHE | \
3317 CPUID_4_LEVEL(1) | \
3318 CPUID_4_SELF_INIT_LEVEL;
3319 *ebx = (L1D_LINE_SIZE - 1) | \
3320 ((L1D_PARTITIONS - 1) << 12) | \
3321 ((L1D_ASSOCIATIVITY - 1) << 22);
3322 *ecx = L1D_SETS - 1;
3323 *edx = CPUID_4_NO_INVD_SHARING;
3324 break;
3325 case 1: /* L1 icache info */
3326 *eax |= CPUID_4_TYPE_ICACHE | \
3327 CPUID_4_LEVEL(1) | \
3328 CPUID_4_SELF_INIT_LEVEL;
3329 *ebx = (L1I_LINE_SIZE - 1) | \
3330 ((L1I_PARTITIONS - 1) << 12) | \
3331 ((L1I_ASSOCIATIVITY - 1) << 22);
3332 *ecx = L1I_SETS - 1;
3333 *edx = CPUID_4_NO_INVD_SHARING;
3334 break;
3335 case 2: /* L2 cache info */
3336 *eax |= CPUID_4_TYPE_UNIFIED | \
3337 CPUID_4_LEVEL(2) | \
3338 CPUID_4_SELF_INIT_LEVEL;
3339 if (cs->nr_threads > 1) {
3340 *eax |= (cs->nr_threads - 1) << 14;
3342 *ebx = (L2_LINE_SIZE - 1) | \
3343 ((L2_PARTITIONS - 1) << 12) | \
3344 ((L2_ASSOCIATIVITY - 1) << 22);
3345 *ecx = L2_SETS - 1;
3346 *edx = CPUID_4_NO_INVD_SHARING;
3347 break;
3348 case 3: /* L3 cache info */
3349 if (!cpu->enable_l3_cache) {
3350 *eax = 0;
3351 *ebx = 0;
3352 *ecx = 0;
3353 *edx = 0;
3354 break;
3356 *eax |= CPUID_4_TYPE_UNIFIED | \
3357 CPUID_4_LEVEL(3) | \
3358 CPUID_4_SELF_INIT_LEVEL;
3359 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3360 *eax |= ((1 << pkg_offset) - 1) << 14;
3361 *ebx = (L3_N_LINE_SIZE - 1) | \
3362 ((L3_N_PARTITIONS - 1) << 12) | \
3363 ((L3_N_ASSOCIATIVITY - 1) << 22);
3364 *ecx = L3_N_SETS - 1;
3365 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3366 break;
3367 default: /* end of info */
3368 *eax = 0;
3369 *ebx = 0;
3370 *ecx = 0;
3371 *edx = 0;
3372 break;
3376 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3377 if ((*eax & 31) && cs->nr_cores > 1) {
3378 *eax |= (cs->nr_cores - 1) << 26;
3380 break;
3381 case 5:
3382 /* mwait info: needed for Core compatibility */
3383 *eax = 0; /* Smallest monitor-line size in bytes */
3384 *ebx = 0; /* Largest monitor-line size in bytes */
3385 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3386 *edx = 0;
3387 break;
3388 case 6:
3389 /* Thermal and Power Leaf */
3390 *eax = env->features[FEAT_6_EAX];
3391 *ebx = 0;
3392 *ecx = 0;
3393 *edx = 0;
3394 break;
3395 case 7:
3396 /* Structured Extended Feature Flags Enumeration Leaf */
3397 if (count == 0) {
3398 *eax = 0; /* Maximum ECX value for sub-leaves */
3399 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3400 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3401 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3402 *ecx |= CPUID_7_0_ECX_OSPKE;
3404 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3405 } else {
3406 *eax = 0;
3407 *ebx = 0;
3408 *ecx = 0;
3409 *edx = 0;
3411 break;
3412 case 9:
3413 /* Direct Cache Access Information Leaf */
3414 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3415 *ebx = 0;
3416 *ecx = 0;
3417 *edx = 0;
3418 break;
3419 case 0xA:
3420 /* Architectural Performance Monitoring Leaf */
3421 if (kvm_enabled() && cpu->enable_pmu) {
3422 KVMState *s = cs->kvm_state;
3424 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3425 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3426 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3427 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3428 } else if (hvf_enabled() && cpu->enable_pmu) {
3429 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3430 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3431 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3432 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3433 } else {
3434 *eax = 0;
3435 *ebx = 0;
3436 *ecx = 0;
3437 *edx = 0;
3439 break;
3440 case 0xB:
3441 /* Extended Topology Enumeration Leaf */
3442 if (!cpu->enable_cpuid_0xb) {
3443 *eax = *ebx = *ecx = *edx = 0;
3444 break;
3447 *ecx = count & 0xff;
3448 *edx = cpu->apic_id;
3450 switch (count) {
3451 case 0:
3452 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3453 *ebx = cs->nr_threads;
3454 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3455 break;
3456 case 1:
3457 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3458 *ebx = cs->nr_cores * cs->nr_threads;
3459 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3460 break;
3461 default:
3462 *eax = 0;
3463 *ebx = 0;
3464 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3467 assert(!(*eax & ~0x1f));
3468 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3469 break;
3470 case 0xD: {
3471 /* Processor Extended State */
3472 *eax = 0;
3473 *ebx = 0;
3474 *ecx = 0;
3475 *edx = 0;
3476 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3477 break;
3480 if (count == 0) {
3481 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3482 *eax = env->features[FEAT_XSAVE_COMP_LO];
3483 *edx = env->features[FEAT_XSAVE_COMP_HI];
3484 *ebx = *ecx;
3485 } else if (count == 1) {
3486 *eax = env->features[FEAT_XSAVE];
3487 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3488 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3489 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3490 *eax = esa->size;
3491 *ebx = esa->offset;
3494 break;
3496 case 0x14: {
3497 /* Intel Processor Trace Enumeration */
3498 *eax = 0;
3499 *ebx = 0;
3500 *ecx = 0;
3501 *edx = 0;
3502 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3503 !kvm_enabled()) {
3504 break;
3507 if (count == 0) {
3508 *eax = INTEL_PT_MAX_SUBLEAF;
3509 *ebx = INTEL_PT_MINIMAL_EBX;
3510 *ecx = INTEL_PT_MINIMAL_ECX;
3511 } else if (count == 1) {
3512 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3513 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3515 break;
3517 case 0x40000000:
3519 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3520 * set here, but we restrict to TCG none the less.
3522 if (tcg_enabled() && cpu->expose_tcg) {
3523 memcpy(signature, "TCGTCGTCGTCG", 12);
3524 *eax = 0x40000001;
3525 *ebx = signature[0];
3526 *ecx = signature[1];
3527 *edx = signature[2];
3528 } else {
3529 *eax = 0;
3530 *ebx = 0;
3531 *ecx = 0;
3532 *edx = 0;
3534 break;
3535 case 0x40000001:
3536 *eax = 0;
3537 *ebx = 0;
3538 *ecx = 0;
3539 *edx = 0;
3540 break;
3541 case 0x80000000:
3542 *eax = env->cpuid_xlevel;
3543 *ebx = env->cpuid_vendor1;
3544 *edx = env->cpuid_vendor2;
3545 *ecx = env->cpuid_vendor3;
3546 break;
3547 case 0x80000001:
3548 *eax = env->cpuid_version;
3549 *ebx = 0;
3550 *ecx = env->features[FEAT_8000_0001_ECX];
3551 *edx = env->features[FEAT_8000_0001_EDX];
3553 /* The Linux kernel checks for the CMPLegacy bit and
3554 * discards multiple thread information if it is set.
3555 * So don't set it here for Intel to make Linux guests happy.
3557 if (cs->nr_cores * cs->nr_threads > 1) {
3558 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3559 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3560 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3561 *ecx |= 1 << 1; /* CmpLegacy bit */
3564 break;
3565 case 0x80000002:
3566 case 0x80000003:
3567 case 0x80000004:
3568 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3569 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3570 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3571 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3572 break;
3573 case 0x80000005:
3574 /* cache info (L1 cache) */
3575 if (cpu->cache_info_passthrough) {
3576 host_cpuid(index, 0, eax, ebx, ecx, edx);
3577 break;
3579 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3580 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3581 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3582 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3583 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3584 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3585 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3586 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3587 break;
3588 case 0x80000006:
3589 /* cache info (L2 cache) */
3590 if (cpu->cache_info_passthrough) {
3591 host_cpuid(index, 0, eax, ebx, ecx, edx);
3592 break;
3594 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3595 (L2_DTLB_2M_ENTRIES << 16) | \
3596 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3597 (L2_ITLB_2M_ENTRIES);
3598 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3599 (L2_DTLB_4K_ENTRIES << 16) | \
3600 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3601 (L2_ITLB_4K_ENTRIES);
3602 *ecx = (L2_SIZE_KB_AMD << 16) | \
3603 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3604 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3605 if (!cpu->enable_l3_cache) {
3606 *edx = ((L3_SIZE_KB / 512) << 18) | \
3607 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3608 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3609 } else {
3610 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3611 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3612 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3614 break;
3615 case 0x80000007:
3616 *eax = 0;
3617 *ebx = 0;
3618 *ecx = 0;
3619 *edx = env->features[FEAT_8000_0007_EDX];
3620 break;
3621 case 0x80000008:
3622 /* virtual & phys address size in low 2 bytes. */
3623 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3624 /* 64 bit processor */
3625 *eax = cpu->phys_bits; /* configurable physical bits */
3626 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3627 *eax |= 0x00003900; /* 57 bits virtual */
3628 } else {
3629 *eax |= 0x00003000; /* 48 bits virtual */
3631 } else {
3632 *eax = cpu->phys_bits;
3634 *ebx = env->features[FEAT_8000_0008_EBX];
3635 *ecx = 0;
3636 *edx = 0;
3637 if (cs->nr_cores * cs->nr_threads > 1) {
3638 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3640 break;
3641 case 0x8000000A:
3642 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3643 *eax = 0x00000001; /* SVM Revision */
3644 *ebx = 0x00000010; /* nr of ASIDs */
3645 *ecx = 0;
3646 *edx = env->features[FEAT_SVM]; /* optional features */
3647 } else {
3648 *eax = 0;
3649 *ebx = 0;
3650 *ecx = 0;
3651 *edx = 0;
3653 break;
3654 case 0xC0000000:
3655 *eax = env->cpuid_xlevel2;
3656 *ebx = 0;
3657 *ecx = 0;
3658 *edx = 0;
3659 break;
3660 case 0xC0000001:
3661 /* Support for VIA CPU's CPUID instruction */
3662 *eax = env->cpuid_version;
3663 *ebx = 0;
3664 *ecx = 0;
3665 *edx = env->features[FEAT_C000_0001_EDX];
3666 break;
3667 case 0xC0000002:
3668 case 0xC0000003:
3669 case 0xC0000004:
3670 /* Reserved for the future, and now filled with zero */
3671 *eax = 0;
3672 *ebx = 0;
3673 *ecx = 0;
3674 *edx = 0;
3675 break;
3676 case 0x8000001F:
3677 *eax = sev_enabled() ? 0x2 : 0;
3678 *ebx = sev_get_cbit_position();
3679 *ebx |= sev_get_reduced_phys_bits() << 6;
3680 *ecx = 0;
3681 *edx = 0;
3682 break;
3683 default:
3684 /* reserved values: zero */
3685 *eax = 0;
3686 *ebx = 0;
3687 *ecx = 0;
3688 *edx = 0;
3689 break;
3693 /* CPUClass::reset() */
3694 static void x86_cpu_reset(CPUState *s)
3696 X86CPU *cpu = X86_CPU(s);
3697 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3698 CPUX86State *env = &cpu->env;
3699 target_ulong cr4;
3700 uint64_t xcr0;
3701 int i;
3703 xcc->parent_reset(s);
3705 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3707 env->old_exception = -1;
3709 /* init to reset state */
3711 env->hflags2 |= HF2_GIF_MASK;
3713 cpu_x86_update_cr0(env, 0x60000010);
3714 env->a20_mask = ~0x0;
3715 env->smbase = 0x30000;
3716 env->msr_smi_count = 0;
3718 env->idt.limit = 0xffff;
3719 env->gdt.limit = 0xffff;
3720 env->ldt.limit = 0xffff;
3721 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3722 env->tr.limit = 0xffff;
3723 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3725 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3726 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3727 DESC_R_MASK | DESC_A_MASK);
3728 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3729 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3730 DESC_A_MASK);
3731 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3733 DESC_A_MASK);
3734 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3736 DESC_A_MASK);
3737 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3739 DESC_A_MASK);
3740 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3741 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3742 DESC_A_MASK);
3744 env->eip = 0xfff0;
3745 env->regs[R_EDX] = env->cpuid_version;
3747 env->eflags = 0x2;
3749 /* FPU init */
3750 for (i = 0; i < 8; i++) {
3751 env->fptags[i] = 1;
3753 cpu_set_fpuc(env, 0x37f);
3755 env->mxcsr = 0x1f80;
3756 /* All units are in INIT state. */
3757 env->xstate_bv = 0;
3759 env->pat = 0x0007040600070406ULL;
3760 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3762 memset(env->dr, 0, sizeof(env->dr));
3763 env->dr[6] = DR6_FIXED_1;
3764 env->dr[7] = DR7_FIXED_1;
3765 cpu_breakpoint_remove_all(s, BP_CPU);
3766 cpu_watchpoint_remove_all(s, BP_CPU);
3768 cr4 = 0;
3769 xcr0 = XSTATE_FP_MASK;
3771 #ifdef CONFIG_USER_ONLY
3772 /* Enable all the features for user-mode. */
3773 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3774 xcr0 |= XSTATE_SSE_MASK;
3776 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3777 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3778 if (env->features[esa->feature] & esa->bits) {
3779 xcr0 |= 1ull << i;
3783 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3784 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3786 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3787 cr4 |= CR4_FSGSBASE_MASK;
3789 #endif
3791 env->xcr0 = xcr0;
3792 cpu_x86_update_cr4(env, cr4);
3795 * SDM 11.11.5 requires:
3796 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3797 * - IA32_MTRR_PHYSMASKn.V = 0
3798 * All other bits are undefined. For simplification, zero it all.
3800 env->mtrr_deftype = 0;
3801 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3802 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3804 env->interrupt_injected = -1;
3805 env->exception_injected = -1;
3806 env->nmi_injected = false;
3807 #if !defined(CONFIG_USER_ONLY)
3808 /* We hard-wire the BSP to the first CPU. */
3809 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3811 s->halted = !cpu_is_bsp(cpu);
3813 if (kvm_enabled()) {
3814 kvm_arch_reset_vcpu(cpu);
3816 else if (hvf_enabled()) {
3817 hvf_reset_vcpu(s);
3819 #endif
3822 #ifndef CONFIG_USER_ONLY
3823 bool cpu_is_bsp(X86CPU *cpu)
3825 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3828 /* TODO: remove me, when reset over QOM tree is implemented */
3829 static void x86_cpu_machine_reset_cb(void *opaque)
3831 X86CPU *cpu = opaque;
3832 cpu_reset(CPU(cpu));
3834 #endif
3836 static void mce_init(X86CPU *cpu)
3838 CPUX86State *cenv = &cpu->env;
3839 unsigned int bank;
3841 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3842 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3843 (CPUID_MCE | CPUID_MCA)) {
3844 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3845 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3846 cenv->mcg_ctl = ~(uint64_t)0;
3847 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3848 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3853 #ifndef CONFIG_USER_ONLY
3854 APICCommonClass *apic_get_class(void)
3856 const char *apic_type = "apic";
3858 /* TODO: in-kernel irqchip for hvf */
3859 if (kvm_apic_in_kernel()) {
3860 apic_type = "kvm-apic";
3861 } else if (xen_enabled()) {
3862 apic_type = "xen-apic";
3865 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3868 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3870 APICCommonState *apic;
3871 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3873 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3875 object_property_add_child(OBJECT(cpu), "lapic",
3876 OBJECT(cpu->apic_state), &error_abort);
3877 object_unref(OBJECT(cpu->apic_state));
3879 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3880 /* TODO: convert to link<> */
3881 apic = APIC_COMMON(cpu->apic_state);
3882 apic->cpu = cpu;
3883 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3886 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3888 APICCommonState *apic;
3889 static bool apic_mmio_map_once;
3891 if (cpu->apic_state == NULL) {
3892 return;
3894 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3895 errp);
3897 /* Map APIC MMIO area */
3898 apic = APIC_COMMON(cpu->apic_state);
3899 if (!apic_mmio_map_once) {
3900 memory_region_add_subregion_overlap(get_system_memory(),
3901 apic->apicbase &
3902 MSR_IA32_APICBASE_BASE,
3903 &apic->io_memory,
3904 0x1000);
3905 apic_mmio_map_once = true;
3909 static void x86_cpu_machine_done(Notifier *n, void *unused)
3911 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3912 MemoryRegion *smram =
3913 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3915 if (smram) {
3916 cpu->smram = g_new(MemoryRegion, 1);
3917 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3918 smram, 0, 1ull << 32);
3919 memory_region_set_enabled(cpu->smram, true);
3920 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3923 #else
3924 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3927 #endif
3929 /* Note: Only safe for use on x86(-64) hosts */
3930 static uint32_t x86_host_phys_bits(void)
3932 uint32_t eax;
3933 uint32_t host_phys_bits;
3935 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3936 if (eax >= 0x80000008) {
3937 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3938 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3939 * at 23:16 that can specify a maximum physical address bits for
3940 * the guest that can override this value; but I've not seen
3941 * anything with that set.
3943 host_phys_bits = eax & 0xff;
3944 } else {
3945 /* It's an odd 64 bit machine that doesn't have the leaf for
3946 * physical address bits; fall back to 36 that's most older
3947 * Intel.
3949 host_phys_bits = 36;
3952 return host_phys_bits;
3955 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3957 if (*min < value) {
3958 *min = value;
3962 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3963 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3965 CPUX86State *env = &cpu->env;
3966 FeatureWordInfo *fi = &feature_word_info[w];
3967 uint32_t eax = fi->cpuid_eax;
3968 uint32_t region = eax & 0xF0000000;
3970 if (!env->features[w]) {
3971 return;
3974 switch (region) {
3975 case 0x00000000:
3976 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3977 break;
3978 case 0x80000000:
3979 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3980 break;
3981 case 0xC0000000:
3982 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3983 break;
3987 /* Calculate XSAVE components based on the configured CPU feature flags */
3988 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3990 CPUX86State *env = &cpu->env;
3991 int i;
3992 uint64_t mask;
3994 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3995 return;
3998 mask = 0;
3999 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4000 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4001 if (env->features[esa->feature] & esa->bits) {
4002 mask |= (1ULL << i);
4006 env->features[FEAT_XSAVE_COMP_LO] = mask;
4007 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4010 /***** Steps involved on loading and filtering CPUID data
4012 * When initializing and realizing a CPU object, the steps
4013 * involved in setting up CPUID data are:
4015 * 1) Loading CPU model definition (X86CPUDefinition). This is
4016 * implemented by x86_cpu_load_def() and should be completely
4017 * transparent, as it is done automatically by instance_init.
4018 * No code should need to look at X86CPUDefinition structs
4019 * outside instance_init.
4021 * 2) CPU expansion. This is done by realize before CPUID
4022 * filtering, and will make sure host/accelerator data is
4023 * loaded for CPU models that depend on host capabilities
4024 * (e.g. "host"). Done by x86_cpu_expand_features().
4026 * 3) CPUID filtering. This initializes extra data related to
4027 * CPUID, and checks if the host supports all capabilities
4028 * required by the CPU. Runnability of a CPU model is
4029 * determined at this step. Done by x86_cpu_filter_features().
4031 * Some operations don't require all steps to be performed.
4032 * More precisely:
4034 * - CPU instance creation (instance_init) will run only CPU
4035 * model loading. CPU expansion can't run at instance_init-time
4036 * because host/accelerator data may be not available yet.
4037 * - CPU realization will perform both CPU model expansion and CPUID
4038 * filtering, and return an error in case one of them fails.
4039 * - query-cpu-definitions needs to run all 3 steps. It needs
4040 * to run CPUID filtering, as the 'unavailable-features'
4041 * field is set based on the filtering results.
4042 * - The query-cpu-model-expansion QMP command only needs to run
4043 * CPU model loading and CPU expansion. It should not filter
4044 * any CPUID data based on host capabilities.
4047 /* Expand CPU configuration data, based on configured features
4048 * and host/accelerator capabilities when appropriate.
4050 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4052 CPUX86State *env = &cpu->env;
4053 FeatureWord w;
4054 GList *l;
4055 Error *local_err = NULL;
4057 /*TODO: Now cpu->max_features doesn't overwrite features
4058 * set using QOM properties, and we can convert
4059 * plus_features & minus_features to global properties
4060 * inside x86_cpu_parse_featurestr() too.
4062 if (cpu->max_features) {
4063 for (w = 0; w < FEATURE_WORDS; w++) {
4064 /* Override only features that weren't set explicitly
4065 * by the user.
4067 env->features[w] |=
4068 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4069 ~env->user_features[w];
4073 for (l = plus_features; l; l = l->next) {
4074 const char *prop = l->data;
4075 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4076 if (local_err) {
4077 goto out;
4081 for (l = minus_features; l; l = l->next) {
4082 const char *prop = l->data;
4083 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4084 if (local_err) {
4085 goto out;
4089 if (!kvm_enabled() || !cpu->expose_kvm) {
4090 env->features[FEAT_KVM] = 0;
4093 x86_cpu_enable_xsave_components(cpu);
4095 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4096 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4097 if (cpu->full_cpuid_auto_level) {
4098 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4099 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4100 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4101 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4102 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4103 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4104 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4105 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4106 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4107 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4108 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4109 /* SVM requires CPUID[0x8000000A] */
4110 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4111 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4114 /* SEV requires CPUID[0x8000001F] */
4115 if (sev_enabled()) {
4116 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4120 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4121 if (env->cpuid_level == UINT32_MAX) {
4122 env->cpuid_level = env->cpuid_min_level;
4124 if (env->cpuid_xlevel == UINT32_MAX) {
4125 env->cpuid_xlevel = env->cpuid_min_xlevel;
4127 if (env->cpuid_xlevel2 == UINT32_MAX) {
4128 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4131 out:
4132 if (local_err != NULL) {
4133 error_propagate(errp, local_err);
4138 * Finishes initialization of CPUID data, filters CPU feature
4139 * words based on host availability of each feature.
4141 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4143 static int x86_cpu_filter_features(X86CPU *cpu)
4145 CPUX86State *env = &cpu->env;
4146 FeatureWord w;
4147 int rv = 0;
4149 for (w = 0; w < FEATURE_WORDS; w++) {
4150 uint32_t host_feat =
4151 x86_cpu_get_supported_feature_word(w, false);
4152 uint32_t requested_features = env->features[w];
4153 env->features[w] &= host_feat;
4154 cpu->filtered_features[w] = requested_features & ~env->features[w];
4155 if (cpu->filtered_features[w]) {
4156 rv = 1;
4160 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4161 kvm_enabled()) {
4162 KVMState *s = CPU(cpu)->kvm_state;
4163 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4164 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4165 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4166 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4167 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4169 if (!eax_0 ||
4170 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4171 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4172 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4173 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4174 INTEL_PT_ADDR_RANGES_NUM) ||
4175 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4176 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP))) {
4178 * Processor Trace capabilities aren't configurable, so if the
4179 * host can't emulate the capabilities we report on
4180 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4182 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4183 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4184 rv = 1;
4188 return rv;
4191 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4192 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4193 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4194 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4195 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4196 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4197 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4199 CPUState *cs = CPU(dev);
4200 X86CPU *cpu = X86_CPU(dev);
4201 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4202 CPUX86State *env = &cpu->env;
4203 Error *local_err = NULL;
4204 static bool ht_warned;
4206 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4207 char *name = x86_cpu_class_get_model_name(xcc);
4208 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4209 g_free(name);
4210 goto out;
4213 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4214 error_setg(errp, "apic-id property was not initialized properly");
4215 return;
4218 x86_cpu_expand_features(cpu, &local_err);
4219 if (local_err) {
4220 goto out;
4223 if (x86_cpu_filter_features(cpu) &&
4224 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4225 x86_cpu_report_filtered_features(cpu);
4226 if (cpu->enforce_cpuid) {
4227 error_setg(&local_err,
4228 accel_uses_host_cpuid() ?
4229 "Host doesn't support requested features" :
4230 "TCG doesn't support requested features");
4231 goto out;
4235 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4236 * CPUID[1].EDX.
4238 if (IS_AMD_CPU(env)) {
4239 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4240 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4241 & CPUID_EXT2_AMD_ALIASES);
4244 /* For 64bit systems think about the number of physical bits to present.
4245 * ideally this should be the same as the host; anything other than matching
4246 * the host can cause incorrect guest behaviour.
4247 * QEMU used to pick the magic value of 40 bits that corresponds to
4248 * consumer AMD devices but nothing else.
4250 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4251 if (accel_uses_host_cpuid()) {
4252 uint32_t host_phys_bits = x86_host_phys_bits();
4253 static bool warned;
4255 if (cpu->host_phys_bits) {
4256 /* The user asked for us to use the host physical bits */
4257 cpu->phys_bits = host_phys_bits;
4260 /* Print a warning if the user set it to a value that's not the
4261 * host value.
4263 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4264 !warned) {
4265 warn_report("Host physical bits (%u)"
4266 " does not match phys-bits property (%u)",
4267 host_phys_bits, cpu->phys_bits);
4268 warned = true;
4271 if (cpu->phys_bits &&
4272 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4273 cpu->phys_bits < 32)) {
4274 error_setg(errp, "phys-bits should be between 32 and %u "
4275 " (but is %u)",
4276 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4277 return;
4279 } else {
4280 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4281 error_setg(errp, "TCG only supports phys-bits=%u",
4282 TCG_PHYS_ADDR_BITS);
4283 return;
4286 /* 0 means it was not explicitly set by the user (or by machine
4287 * compat_props or by the host code above). In this case, the default
4288 * is the value used by TCG (40).
4290 if (cpu->phys_bits == 0) {
4291 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4293 } else {
4294 /* For 32 bit systems don't use the user set value, but keep
4295 * phys_bits consistent with what we tell the guest.
4297 if (cpu->phys_bits != 0) {
4298 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4299 return;
4302 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4303 cpu->phys_bits = 36;
4304 } else {
4305 cpu->phys_bits = 32;
4308 cpu_exec_realizefn(cs, &local_err);
4309 if (local_err != NULL) {
4310 error_propagate(errp, local_err);
4311 return;
4314 #ifndef CONFIG_USER_ONLY
4315 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4317 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4318 x86_cpu_apic_create(cpu, &local_err);
4319 if (local_err != NULL) {
4320 goto out;
4323 #endif
4325 mce_init(cpu);
4327 #ifndef CONFIG_USER_ONLY
4328 if (tcg_enabled()) {
4329 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4330 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4332 /* Outer container... */
4333 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4334 memory_region_set_enabled(cpu->cpu_as_root, true);
4336 /* ... with two regions inside: normal system memory with low
4337 * priority, and...
4339 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4340 get_system_memory(), 0, ~0ull);
4341 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4342 memory_region_set_enabled(cpu->cpu_as_mem, true);
4344 cs->num_ases = 2;
4345 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4346 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4348 /* ... SMRAM with higher priority, linked from /machine/smram. */
4349 cpu->machine_done.notify = x86_cpu_machine_done;
4350 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4352 #endif
4354 qemu_init_vcpu(cs);
4356 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4357 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4358 * based on inputs (sockets,cores,threads), it is still better to gives
4359 * users a warning.
4361 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4362 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4364 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4365 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4366 " -smp options properly.");
4367 ht_warned = true;
4370 x86_cpu_apic_realize(cpu, &local_err);
4371 if (local_err != NULL) {
4372 goto out;
4374 cpu_reset(cs);
4376 xcc->parent_realize(dev, &local_err);
4378 out:
4379 if (local_err != NULL) {
4380 error_propagate(errp, local_err);
4381 return;
4385 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4387 X86CPU *cpu = X86_CPU(dev);
4388 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4389 Error *local_err = NULL;
4391 #ifndef CONFIG_USER_ONLY
4392 cpu_remove_sync(CPU(dev));
4393 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4394 #endif
4396 if (cpu->apic_state) {
4397 object_unparent(OBJECT(cpu->apic_state));
4398 cpu->apic_state = NULL;
4401 xcc->parent_unrealize(dev, &local_err);
4402 if (local_err != NULL) {
4403 error_propagate(errp, local_err);
4404 return;
4408 typedef struct BitProperty {
4409 FeatureWord w;
4410 uint32_t mask;
4411 } BitProperty;
4413 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4414 void *opaque, Error **errp)
4416 X86CPU *cpu = X86_CPU(obj);
4417 BitProperty *fp = opaque;
4418 uint32_t f = cpu->env.features[fp->w];
4419 bool value = (f & fp->mask) == fp->mask;
4420 visit_type_bool(v, name, &value, errp);
4423 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4424 void *opaque, Error **errp)
4426 DeviceState *dev = DEVICE(obj);
4427 X86CPU *cpu = X86_CPU(obj);
4428 BitProperty *fp = opaque;
4429 Error *local_err = NULL;
4430 bool value;
4432 if (dev->realized) {
4433 qdev_prop_set_after_realize(dev, name, errp);
4434 return;
4437 visit_type_bool(v, name, &value, &local_err);
4438 if (local_err) {
4439 error_propagate(errp, local_err);
4440 return;
4443 if (value) {
4444 cpu->env.features[fp->w] |= fp->mask;
4445 } else {
4446 cpu->env.features[fp->w] &= ~fp->mask;
4448 cpu->env.user_features[fp->w] |= fp->mask;
4451 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4452 void *opaque)
4454 BitProperty *prop = opaque;
4455 g_free(prop);
4458 /* Register a boolean property to get/set a single bit in a uint32_t field.
4460 * The same property name can be registered multiple times to make it affect
4461 * multiple bits in the same FeatureWord. In that case, the getter will return
4462 * true only if all bits are set.
4464 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4465 const char *prop_name,
4466 FeatureWord w,
4467 int bitnr)
4469 BitProperty *fp;
4470 ObjectProperty *op;
4471 uint32_t mask = (1UL << bitnr);
4473 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4474 if (op) {
4475 fp = op->opaque;
4476 assert(fp->w == w);
4477 fp->mask |= mask;
4478 } else {
4479 fp = g_new0(BitProperty, 1);
4480 fp->w = w;
4481 fp->mask = mask;
4482 object_property_add(OBJECT(cpu), prop_name, "bool",
4483 x86_cpu_get_bit_prop,
4484 x86_cpu_set_bit_prop,
4485 x86_cpu_release_bit_prop, fp, &error_abort);
4489 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4490 FeatureWord w,
4491 int bitnr)
4493 FeatureWordInfo *fi = &feature_word_info[w];
4494 const char *name = fi->feat_names[bitnr];
4496 if (!name) {
4497 return;
4500 /* Property names should use "-" instead of "_".
4501 * Old names containing underscores are registered as aliases
4502 * using object_property_add_alias()
4504 assert(!strchr(name, '_'));
4505 /* aliases don't use "|" delimiters anymore, they are registered
4506 * manually using object_property_add_alias() */
4507 assert(!strchr(name, '|'));
4508 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4511 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4513 X86CPU *cpu = X86_CPU(cs);
4514 CPUX86State *env = &cpu->env;
4515 GuestPanicInformation *panic_info = NULL;
4517 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4518 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4520 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4522 assert(HV_CRASH_PARAMS >= 5);
4523 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4524 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4525 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4526 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4527 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4530 return panic_info;
4532 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4533 const char *name, void *opaque,
4534 Error **errp)
4536 CPUState *cs = CPU(obj);
4537 GuestPanicInformation *panic_info;
4539 if (!cs->crash_occurred) {
4540 error_setg(errp, "No crash occured");
4541 return;
4544 panic_info = x86_cpu_get_crash_info(cs);
4545 if (panic_info == NULL) {
4546 error_setg(errp, "No crash information");
4547 return;
4550 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4551 errp);
4552 qapi_free_GuestPanicInformation(panic_info);
4555 static void x86_cpu_initfn(Object *obj)
4557 CPUState *cs = CPU(obj);
4558 X86CPU *cpu = X86_CPU(obj);
4559 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4560 CPUX86State *env = &cpu->env;
4561 FeatureWord w;
4563 cs->env_ptr = env;
4565 object_property_add(obj, "family", "int",
4566 x86_cpuid_version_get_family,
4567 x86_cpuid_version_set_family, NULL, NULL, NULL);
4568 object_property_add(obj, "model", "int",
4569 x86_cpuid_version_get_model,
4570 x86_cpuid_version_set_model, NULL, NULL, NULL);
4571 object_property_add(obj, "stepping", "int",
4572 x86_cpuid_version_get_stepping,
4573 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4574 object_property_add_str(obj, "vendor",
4575 x86_cpuid_get_vendor,
4576 x86_cpuid_set_vendor, NULL);
4577 object_property_add_str(obj, "model-id",
4578 x86_cpuid_get_model_id,
4579 x86_cpuid_set_model_id, NULL);
4580 object_property_add(obj, "tsc-frequency", "int",
4581 x86_cpuid_get_tsc_freq,
4582 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4583 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4584 x86_cpu_get_feature_words,
4585 NULL, NULL, (void *)env->features, NULL);
4586 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4587 x86_cpu_get_feature_words,
4588 NULL, NULL, (void *)cpu->filtered_features, NULL);
4590 object_property_add(obj, "crash-information", "GuestPanicInformation",
4591 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4593 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4595 for (w = 0; w < FEATURE_WORDS; w++) {
4596 int bitnr;
4598 for (bitnr = 0; bitnr < 32; bitnr++) {
4599 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4603 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4604 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4605 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4606 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4607 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4608 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4609 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4611 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4612 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4613 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4614 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4615 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4616 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4617 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4618 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4619 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4620 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4621 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4622 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4623 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4624 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4625 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4626 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4627 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4628 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4629 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4630 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4631 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4633 if (xcc->cpu_def) {
4634 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4638 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4640 X86CPU *cpu = X86_CPU(cs);
4642 return cpu->apic_id;
4645 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4647 X86CPU *cpu = X86_CPU(cs);
4649 return cpu->env.cr[0] & CR0_PG_MASK;
4652 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4654 X86CPU *cpu = X86_CPU(cs);
4656 cpu->env.eip = value;
4659 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4661 X86CPU *cpu = X86_CPU(cs);
4663 cpu->env.eip = tb->pc - tb->cs_base;
4666 static bool x86_cpu_has_work(CPUState *cs)
4668 X86CPU *cpu = X86_CPU(cs);
4669 CPUX86State *env = &cpu->env;
4671 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4672 CPU_INTERRUPT_POLL)) &&
4673 (env->eflags & IF_MASK)) ||
4674 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4675 CPU_INTERRUPT_INIT |
4676 CPU_INTERRUPT_SIPI |
4677 CPU_INTERRUPT_MCE)) ||
4678 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4679 !(env->hflags & HF_SMM_MASK));
4682 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4684 X86CPU *cpu = X86_CPU(cs);
4685 CPUX86State *env = &cpu->env;
4687 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4688 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4689 : bfd_mach_i386_i8086);
4690 info->print_insn = print_insn_i386;
4692 info->cap_arch = CS_ARCH_X86;
4693 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4694 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4695 : CS_MODE_16);
4696 info->cap_insn_unit = 1;
4697 info->cap_insn_split = 8;
4700 void x86_update_hflags(CPUX86State *env)
4702 uint32_t hflags;
4703 #define HFLAG_COPY_MASK \
4704 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4705 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4706 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4707 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4709 hflags = env->hflags & HFLAG_COPY_MASK;
4710 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4711 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4712 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4713 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4714 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4716 if (env->cr[4] & CR4_OSFXSR_MASK) {
4717 hflags |= HF_OSFXSR_MASK;
4720 if (env->efer & MSR_EFER_LMA) {
4721 hflags |= HF_LMA_MASK;
4724 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4725 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4726 } else {
4727 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4728 (DESC_B_SHIFT - HF_CS32_SHIFT);
4729 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4730 (DESC_B_SHIFT - HF_SS32_SHIFT);
4731 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4732 !(hflags & HF_CS32_MASK)) {
4733 hflags |= HF_ADDSEG_MASK;
4734 } else {
4735 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4736 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4739 env->hflags = hflags;
4742 static Property x86_cpu_properties[] = {
4743 #ifdef CONFIG_USER_ONLY
4744 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4745 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4746 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4747 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4748 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4749 #else
4750 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4751 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4752 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4753 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4754 #endif
4755 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4756 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4757 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4758 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4759 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4760 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4761 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4762 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4763 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4764 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4765 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4766 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4767 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4768 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4769 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4770 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4771 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4772 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4773 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4774 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4775 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4776 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4777 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4778 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4779 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4780 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4781 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4782 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4783 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4784 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4785 false),
4786 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4787 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4790 * From "Requirements for Implementing the Microsoft
4791 * Hypervisor Interface":
4792 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4794 * "Starting with Windows Server 2012 and Windows 8, if
4795 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4796 * the hypervisor imposes no specific limit to the number of VPs.
4797 * In this case, Windows Server 2012 guest VMs may use more than
4798 * 64 VPs, up to the maximum supported number of processors applicable
4799 * to the specific Windows version being used."
4801 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4802 DEFINE_PROP_END_OF_LIST()
4805 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4807 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4808 CPUClass *cc = CPU_CLASS(oc);
4809 DeviceClass *dc = DEVICE_CLASS(oc);
4811 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4812 &xcc->parent_realize);
4813 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4814 &xcc->parent_unrealize);
4815 dc->props = x86_cpu_properties;
4817 xcc->parent_reset = cc->reset;
4818 cc->reset = x86_cpu_reset;
4819 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4821 cc->class_by_name = x86_cpu_class_by_name;
4822 cc->parse_features = x86_cpu_parse_featurestr;
4823 cc->has_work = x86_cpu_has_work;
4824 #ifdef CONFIG_TCG
4825 cc->do_interrupt = x86_cpu_do_interrupt;
4826 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4827 #endif
4828 cc->dump_state = x86_cpu_dump_state;
4829 cc->get_crash_info = x86_cpu_get_crash_info;
4830 cc->set_pc = x86_cpu_set_pc;
4831 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4832 cc->gdb_read_register = x86_cpu_gdb_read_register;
4833 cc->gdb_write_register = x86_cpu_gdb_write_register;
4834 cc->get_arch_id = x86_cpu_get_arch_id;
4835 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4836 #ifdef CONFIG_USER_ONLY
4837 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4838 #else
4839 cc->asidx_from_attrs = x86_asidx_from_attrs;
4840 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4841 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4842 cc->write_elf64_note = x86_cpu_write_elf64_note;
4843 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4844 cc->write_elf32_note = x86_cpu_write_elf32_note;
4845 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4846 cc->vmsd = &vmstate_x86_cpu;
4847 #endif
4848 cc->gdb_arch_name = x86_gdb_arch_name;
4849 #ifdef TARGET_X86_64
4850 cc->gdb_core_xml_file = "i386-64bit.xml";
4851 cc->gdb_num_core_regs = 57;
4852 #else
4853 cc->gdb_core_xml_file = "i386-32bit.xml";
4854 cc->gdb_num_core_regs = 41;
4855 #endif
4856 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4857 cc->debug_excp_handler = breakpoint_handler;
4858 #endif
4859 cc->cpu_exec_enter = x86_cpu_exec_enter;
4860 cc->cpu_exec_exit = x86_cpu_exec_exit;
4861 #ifdef CONFIG_TCG
4862 cc->tcg_initialize = tcg_x86_init;
4863 #endif
4864 cc->disas_set_info = x86_disas_set_info;
4866 dc->user_creatable = true;
4869 static const TypeInfo x86_cpu_type_info = {
4870 .name = TYPE_X86_CPU,
4871 .parent = TYPE_CPU,
4872 .instance_size = sizeof(X86CPU),
4873 .instance_init = x86_cpu_initfn,
4874 .abstract = true,
4875 .class_size = sizeof(X86CPUClass),
4876 .class_init = x86_cpu_common_class_init,
4880 /* "base" CPU model, used by query-cpu-model-expansion */
4881 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4883 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4885 xcc->static_model = true;
4886 xcc->migration_safe = true;
4887 xcc->model_description = "base CPU model type with no features enabled";
4888 xcc->ordering = 8;
4891 static const TypeInfo x86_base_cpu_type_info = {
4892 .name = X86_CPU_TYPE_NAME("base"),
4893 .parent = TYPE_X86_CPU,
4894 .class_init = x86_cpu_base_class_init,
4897 static void x86_cpu_register_types(void)
4899 int i;
4901 type_register_static(&x86_cpu_type_info);
4902 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4903 x86_register_cpudef_type(&builtin_x86_defs[i]);
4905 type_register_static(&max_x86_cpu_type_info);
4906 type_register_static(&x86_base_cpu_type_info);
4907 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4908 type_register_static(&host_x86_cpu_type_info);
4909 #endif
4912 type_init(x86_cpu_register_types)