i386: Don't automatically enable FEAT_KVM_HINTS bits
[qemu.git] / target / i386 / cpu.c
bloba20fe265735e99000550fc7d0a9be86eb00c1278
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
57 #include "disas/capstone.h"
60 /* Cache topology CPUID constants: */
62 /* CPUID Leaf 2 Descriptors */
64 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
65 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
66 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
67 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
70 /* CPUID Leaf 4 constants: */
72 /* EAX: */
73 #define CPUID_4_TYPE_DCACHE 1
74 #define CPUID_4_TYPE_ICACHE 2
75 #define CPUID_4_TYPE_UNIFIED 3
77 #define CPUID_4_LEVEL(l) ((l) << 5)
79 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
80 #define CPUID_4_FULLY_ASSOC (1 << 9)
82 /* EDX: */
83 #define CPUID_4_NO_INVD_SHARING (1 << 0)
84 #define CPUID_4_INCLUSIVE (1 << 1)
85 #define CPUID_4_COMPLEX_IDX (1 << 2)
87 #define ASSOC_FULL 0xFF
89 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
90 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == 2 ? 0x2 : \
92 a == 4 ? 0x4 : \
93 a == 8 ? 0x6 : \
94 a == 16 ? 0x8 : \
95 a == 32 ? 0xA : \
96 a == 48 ? 0xB : \
97 a == 64 ? 0xC : \
98 a == 96 ? 0xD : \
99 a == 128 ? 0xE : \
100 a == ASSOC_FULL ? 0xF : \
101 0 /* invalid value */)
104 /* Definitions of the hardcoded cache entries we expose: */
106 /* L1 data cache: */
107 #define L1D_LINE_SIZE 64
108 #define L1D_ASSOCIATIVITY 8
109 #define L1D_SETS 64
110 #define L1D_PARTITIONS 1
111 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
112 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
113 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
114 #define L1D_LINES_PER_TAG 1
115 #define L1D_SIZE_KB_AMD 64
116 #define L1D_ASSOCIATIVITY_AMD 2
118 /* L1 instruction cache: */
119 #define L1I_LINE_SIZE 64
120 #define L1I_ASSOCIATIVITY 8
121 #define L1I_SETS 64
122 #define L1I_PARTITIONS 1
123 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
124 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
125 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
126 #define L1I_LINES_PER_TAG 1
127 #define L1I_SIZE_KB_AMD 64
128 #define L1I_ASSOCIATIVITY_AMD 2
130 /* Level 2 unified cache: */
131 #define L2_LINE_SIZE 64
132 #define L2_ASSOCIATIVITY 16
133 #define L2_SETS 4096
134 #define L2_PARTITIONS 1
135 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
136 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
137 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
138 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
139 #define L2_LINES_PER_TAG 1
140 #define L2_SIZE_KB_AMD 512
142 /* Level 3 unified cache: */
143 #define L3_SIZE_KB 0 /* disabled */
144 #define L3_ASSOCIATIVITY 0 /* disabled */
145 #define L3_LINES_PER_TAG 0 /* disabled */
146 #define L3_LINE_SIZE 0 /* disabled */
147 #define L3_N_LINE_SIZE 64
148 #define L3_N_ASSOCIATIVITY 16
149 #define L3_N_SETS 16384
150 #define L3_N_PARTITIONS 1
151 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
152 #define L3_N_LINES_PER_TAG 1
153 #define L3_N_SIZE_KB_AMD 16384
155 /* TLB definitions: */
157 #define L1_DTLB_2M_ASSOC 1
158 #define L1_DTLB_2M_ENTRIES 255
159 #define L1_DTLB_4K_ASSOC 1
160 #define L1_DTLB_4K_ENTRIES 255
162 #define L1_ITLB_2M_ASSOC 1
163 #define L1_ITLB_2M_ENTRIES 255
164 #define L1_ITLB_4K_ASSOC 1
165 #define L1_ITLB_4K_ENTRIES 255
167 #define L2_DTLB_2M_ASSOC 0 /* disabled */
168 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
169 #define L2_DTLB_4K_ASSOC 4
170 #define L2_DTLB_4K_ENTRIES 512
172 #define L2_ITLB_2M_ASSOC 0 /* disabled */
173 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
174 #define L2_ITLB_4K_ASSOC 4
175 #define L2_ITLB_4K_ENTRIES 512
177 /* CPUID Leaf 0x14 constants: */
178 #define INTEL_PT_MAX_SUBLEAF 0x1
180 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
181 * MSR can be accessed;
182 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
183 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
184 * of Intel PT MSRs across warm reset;
185 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
187 #define INTEL_PT_MINIMAL_EBX 0xf
189 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
190 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
191 * accessed;
192 * bit[01]: ToPA tables can hold any number of output entries, up to the
193 * maximum allowed by the MaskOrTableOffset field of
194 * IA32_RTIT_OUTPUT_MASK_PTRS;
195 * bit[02]: Support Single-Range Output scheme;
197 #define INTEL_PT_MINIMAL_ECX 0x7
198 /* generated packets which contain IP payloads have LIP values */
199 #define INTEL_PT_IP_LIP (1 << 31)
200 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
201 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
202 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
203 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
204 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
206 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
207 uint32_t vendor2, uint32_t vendor3)
209 int i;
210 for (i = 0; i < 4; i++) {
211 dst[i] = vendor1 >> (8 * i);
212 dst[i + 4] = vendor2 >> (8 * i);
213 dst[i + 8] = vendor3 >> (8 * i);
215 dst[CPUID_VENDOR_SZ] = '\0';
218 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
219 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
220 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
221 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
222 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
223 CPUID_PSE36 | CPUID_FXSR)
224 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
225 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
226 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
227 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
228 CPUID_PAE | CPUID_SEP | CPUID_APIC)
230 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
231 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
232 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
233 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
234 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
235 /* partly implemented:
236 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
237 /* missing:
238 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
239 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
240 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
241 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
242 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
243 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
244 /* missing:
245 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
246 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
247 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
248 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
249 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
251 #ifdef TARGET_X86_64
252 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
253 #else
254 #define TCG_EXT2_X86_64_FEATURES 0
255 #endif
257 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
258 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
259 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
260 TCG_EXT2_X86_64_FEATURES)
261 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
262 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
263 #define TCG_EXT4_FEATURES 0
264 #define TCG_SVM_FEATURES 0
265 #define TCG_KVM_FEATURES 0
266 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
267 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
268 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
269 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
270 CPUID_7_0_EBX_ERMS)
271 /* missing:
272 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
273 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
274 CPUID_7_0_EBX_RDSEED */
275 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
276 CPUID_7_0_ECX_LA57)
277 #define TCG_7_0_EDX_FEATURES 0
278 #define TCG_APM_FEATURES 0
279 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
280 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
281 /* missing:
282 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
284 typedef struct FeatureWordInfo {
285 /* feature flags names are taken from "Intel Processor Identification and
286 * the CPUID Instruction" and AMD's "CPUID Specification".
287 * In cases of disagreement between feature naming conventions,
288 * aliases may be added.
290 const char *feat_names[32];
291 uint32_t cpuid_eax; /* Input EAX for CPUID */
292 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
293 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
294 int cpuid_reg; /* output register (R_* constant) */
295 uint32_t tcg_features; /* Feature flags supported by TCG */
296 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
297 uint32_t migratable_flags; /* Feature flags known to be migratable */
298 /* Features that shouldn't be auto-enabled by "-cpu host" */
299 uint32_t no_autoenable_flags;
300 } FeatureWordInfo;
302 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
303 [FEAT_1_EDX] = {
304 .feat_names = {
305 "fpu", "vme", "de", "pse",
306 "tsc", "msr", "pae", "mce",
307 "cx8", "apic", NULL, "sep",
308 "mtrr", "pge", "mca", "cmov",
309 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
310 NULL, "ds" /* Intel dts */, "acpi", "mmx",
311 "fxsr", "sse", "sse2", "ss",
312 "ht" /* Intel htt */, "tm", "ia64", "pbe",
314 .cpuid_eax = 1, .cpuid_reg = R_EDX,
315 .tcg_features = TCG_FEATURES,
317 [FEAT_1_ECX] = {
318 .feat_names = {
319 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
320 "ds-cpl", "vmx", "smx", "est",
321 "tm2", "ssse3", "cid", NULL,
322 "fma", "cx16", "xtpr", "pdcm",
323 NULL, "pcid", "dca", "sse4.1",
324 "sse4.2", "x2apic", "movbe", "popcnt",
325 "tsc-deadline", "aes", "xsave", "osxsave",
326 "avx", "f16c", "rdrand", "hypervisor",
328 .cpuid_eax = 1, .cpuid_reg = R_ECX,
329 .tcg_features = TCG_EXT_FEATURES,
331 /* Feature names that are already defined on feature_name[] but
332 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
333 * names on feat_names below. They are copied automatically
334 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
336 [FEAT_8000_0001_EDX] = {
337 .feat_names = {
338 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
339 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
340 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
341 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
342 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
343 "nx", NULL, "mmxext", NULL /* mmx */,
344 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
345 NULL, "lm", "3dnowext", "3dnow",
347 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_EXT2_FEATURES,
350 [FEAT_8000_0001_ECX] = {
351 .feat_names = {
352 "lahf-lm", "cmp-legacy", "svm", "extapic",
353 "cr8legacy", "abm", "sse4a", "misalignsse",
354 "3dnowprefetch", "osvw", "ibs", "xop",
355 "skinit", "wdt", NULL, "lwp",
356 "fma4", "tce", NULL, "nodeid-msr",
357 NULL, "tbm", "topoext", "perfctr-core",
358 "perfctr-nb", NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL,
361 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
362 .tcg_features = TCG_EXT3_FEATURES,
364 [FEAT_C000_0001_EDX] = {
365 .feat_names = {
366 NULL, NULL, "xstore", "xstore-en",
367 NULL, NULL, "xcrypt", "xcrypt-en",
368 "ace2", "ace2-en", "phe", "phe-en",
369 "pmm", "pmm-en", NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
375 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
376 .tcg_features = TCG_EXT4_FEATURES,
378 [FEAT_KVM] = {
379 .feat_names = {
380 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
381 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
382 NULL, "kvm-pv-tlb-flush", NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 "kvmclock-stable-bit", NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL,
389 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
390 .tcg_features = TCG_KVM_FEATURES,
392 [FEAT_KVM_HINTS] = {
393 .feat_names = {
394 "kvm-hint-dedicated", NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
403 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
404 .tcg_features = TCG_KVM_FEATURES,
406 * KVM hints aren't auto-enabled by -cpu host, they need to be
407 * explicitly enabled in the command-line.
409 .no_autoenable_flags = ~0U,
411 [FEAT_HYPERV_EAX] = {
412 .feat_names = {
413 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
414 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
415 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
416 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
417 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
418 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
419 NULL, NULL, NULL, NULL,
420 NULL, NULL, NULL, NULL,
421 NULL, NULL, NULL, NULL,
422 NULL, NULL, NULL, NULL,
423 NULL, NULL, NULL, NULL,
425 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
427 [FEAT_HYPERV_EBX] = {
428 .feat_names = {
429 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
430 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
431 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
432 NULL /* hv_create_port */, NULL /* hv_connect_port */,
433 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
434 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
435 NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
441 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
443 [FEAT_HYPERV_EDX] = {
444 .feat_names = {
445 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
446 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
447 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
448 NULL, NULL,
449 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
450 NULL, NULL, NULL, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
456 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
458 [FEAT_SVM] = {
459 .feat_names = {
460 "npt", "lbrv", "svm-lock", "nrip-save",
461 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
462 NULL, NULL, "pause-filter", NULL,
463 "pfthreshold", NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
469 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
470 .tcg_features = TCG_SVM_FEATURES,
472 [FEAT_7_0_EBX] = {
473 .feat_names = {
474 "fsgsbase", "tsc-adjust", NULL, "bmi1",
475 "hle", "avx2", NULL, "smep",
476 "bmi2", "erms", "invpcid", "rtm",
477 NULL, NULL, "mpx", NULL,
478 "avx512f", "avx512dq", "rdseed", "adx",
479 "smap", "avx512ifma", "pcommit", "clflushopt",
480 "clwb", "intel-pt", "avx512pf", "avx512er",
481 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
483 .cpuid_eax = 7,
484 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
485 .cpuid_reg = R_EBX,
486 .tcg_features = TCG_7_0_EBX_FEATURES,
488 [FEAT_7_0_ECX] = {
489 .feat_names = {
490 NULL, "avx512vbmi", "umip", "pku",
491 "ospke", NULL, "avx512vbmi2", NULL,
492 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
493 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
494 "la57", NULL, NULL, NULL,
495 NULL, NULL, "rdpid", NULL,
496 NULL, NULL, NULL, NULL,
497 NULL, NULL, NULL, NULL,
499 .cpuid_eax = 7,
500 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
501 .cpuid_reg = R_ECX,
502 .tcg_features = TCG_7_0_ECX_FEATURES,
504 [FEAT_7_0_EDX] = {
505 .feat_names = {
506 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, "spec-ctrl", NULL,
513 NULL, NULL, NULL, NULL,
515 .cpuid_eax = 7,
516 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
517 .cpuid_reg = R_EDX,
518 .tcg_features = TCG_7_0_EDX_FEATURES,
520 [FEAT_8000_0007_EDX] = {
521 .feat_names = {
522 NULL, NULL, NULL, NULL,
523 NULL, NULL, NULL, NULL,
524 "invtsc", NULL, NULL, NULL,
525 NULL, NULL, NULL, NULL,
526 NULL, NULL, NULL, NULL,
527 NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 NULL, NULL, NULL, NULL,
531 .cpuid_eax = 0x80000007,
532 .cpuid_reg = R_EDX,
533 .tcg_features = TCG_APM_FEATURES,
534 .unmigratable_flags = CPUID_APM_INVTSC,
536 [FEAT_8000_0008_EBX] = {
537 .feat_names = {
538 NULL, NULL, NULL, NULL,
539 NULL, NULL, NULL, NULL,
540 NULL, NULL, NULL, NULL,
541 "ibpb", NULL, NULL, NULL,
542 NULL, NULL, NULL, NULL,
543 NULL, NULL, NULL, NULL,
544 NULL, NULL, NULL, NULL,
545 NULL, NULL, NULL, NULL,
547 .cpuid_eax = 0x80000008,
548 .cpuid_reg = R_EBX,
549 .tcg_features = 0,
550 .unmigratable_flags = 0,
552 [FEAT_XSAVE] = {
553 .feat_names = {
554 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
555 NULL, NULL, NULL, NULL,
556 NULL, NULL, NULL, NULL,
557 NULL, NULL, NULL, NULL,
558 NULL, NULL, NULL, NULL,
559 NULL, NULL, NULL, NULL,
560 NULL, NULL, NULL, NULL,
561 NULL, NULL, NULL, NULL,
563 .cpuid_eax = 0xd,
564 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
565 .cpuid_reg = R_EAX,
566 .tcg_features = TCG_XSAVE_FEATURES,
568 [FEAT_6_EAX] = {
569 .feat_names = {
570 NULL, NULL, "arat", NULL,
571 NULL, NULL, NULL, NULL,
572 NULL, NULL, NULL, NULL,
573 NULL, NULL, NULL, NULL,
574 NULL, NULL, NULL, NULL,
575 NULL, NULL, NULL, NULL,
576 NULL, NULL, NULL, NULL,
577 NULL, NULL, NULL, NULL,
579 .cpuid_eax = 6, .cpuid_reg = R_EAX,
580 .tcg_features = TCG_6_EAX_FEATURES,
582 [FEAT_XSAVE_COMP_LO] = {
583 .cpuid_eax = 0xD,
584 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
585 .cpuid_reg = R_EAX,
586 .tcg_features = ~0U,
587 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
588 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
589 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
590 XSTATE_PKRU_MASK,
592 [FEAT_XSAVE_COMP_HI] = {
593 .cpuid_eax = 0xD,
594 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
595 .cpuid_reg = R_EDX,
596 .tcg_features = ~0U,
600 typedef struct X86RegisterInfo32 {
601 /* Name of register */
602 const char *name;
603 /* QAPI enum value register */
604 X86CPURegister32 qapi_enum;
605 } X86RegisterInfo32;
607 #define REGISTER(reg) \
608 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
609 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
610 REGISTER(EAX),
611 REGISTER(ECX),
612 REGISTER(EDX),
613 REGISTER(EBX),
614 REGISTER(ESP),
615 REGISTER(EBP),
616 REGISTER(ESI),
617 REGISTER(EDI),
619 #undef REGISTER
621 typedef struct ExtSaveArea {
622 uint32_t feature, bits;
623 uint32_t offset, size;
624 } ExtSaveArea;
626 static const ExtSaveArea x86_ext_save_areas[] = {
627 [XSTATE_FP_BIT] = {
628 /* x87 FP state component is always enabled if XSAVE is supported */
629 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
630 /* x87 state is in the legacy region of the XSAVE area */
631 .offset = 0,
632 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
634 [XSTATE_SSE_BIT] = {
635 /* SSE state component is always enabled if XSAVE is supported */
636 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
637 /* SSE state is in the legacy region of the XSAVE area */
638 .offset = 0,
639 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
641 [XSTATE_YMM_BIT] =
642 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
643 .offset = offsetof(X86XSaveArea, avx_state),
644 .size = sizeof(XSaveAVX) },
645 [XSTATE_BNDREGS_BIT] =
646 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
647 .offset = offsetof(X86XSaveArea, bndreg_state),
648 .size = sizeof(XSaveBNDREG) },
649 [XSTATE_BNDCSR_BIT] =
650 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
651 .offset = offsetof(X86XSaveArea, bndcsr_state),
652 .size = sizeof(XSaveBNDCSR) },
653 [XSTATE_OPMASK_BIT] =
654 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
655 .offset = offsetof(X86XSaveArea, opmask_state),
656 .size = sizeof(XSaveOpmask) },
657 [XSTATE_ZMM_Hi256_BIT] =
658 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
659 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
660 .size = sizeof(XSaveZMM_Hi256) },
661 [XSTATE_Hi16_ZMM_BIT] =
662 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
663 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
664 .size = sizeof(XSaveHi16_ZMM) },
665 [XSTATE_PKRU_BIT] =
666 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
667 .offset = offsetof(X86XSaveArea, pkru_state),
668 .size = sizeof(XSavePKRU) },
671 static uint32_t xsave_area_size(uint64_t mask)
673 int i;
674 uint64_t ret = 0;
676 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
677 const ExtSaveArea *esa = &x86_ext_save_areas[i];
678 if ((mask >> i) & 1) {
679 ret = MAX(ret, esa->offset + esa->size);
682 return ret;
685 static inline bool accel_uses_host_cpuid(void)
687 return kvm_enabled() || hvf_enabled();
690 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
692 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
693 cpu->env.features[FEAT_XSAVE_COMP_LO];
696 const char *get_register_name_32(unsigned int reg)
698 if (reg >= CPU_NB_REGS32) {
699 return NULL;
701 return x86_reg_info_32[reg].name;
705 * Returns the set of feature flags that are supported and migratable by
706 * QEMU, for a given FeatureWord.
708 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
710 FeatureWordInfo *wi = &feature_word_info[w];
711 uint32_t r = 0;
712 int i;
714 for (i = 0; i < 32; i++) {
715 uint32_t f = 1U << i;
717 /* If the feature name is known, it is implicitly considered migratable,
718 * unless it is explicitly set in unmigratable_flags */
719 if ((wi->migratable_flags & f) ||
720 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
721 r |= f;
724 return r;
727 void host_cpuid(uint32_t function, uint32_t count,
728 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
730 uint32_t vec[4];
732 #ifdef __x86_64__
733 asm volatile("cpuid"
734 : "=a"(vec[0]), "=b"(vec[1]),
735 "=c"(vec[2]), "=d"(vec[3])
736 : "0"(function), "c"(count) : "cc");
737 #elif defined(__i386__)
738 asm volatile("pusha \n\t"
739 "cpuid \n\t"
740 "mov %%eax, 0(%2) \n\t"
741 "mov %%ebx, 4(%2) \n\t"
742 "mov %%ecx, 8(%2) \n\t"
743 "mov %%edx, 12(%2) \n\t"
744 "popa"
745 : : "a"(function), "c"(count), "S"(vec)
746 : "memory", "cc");
747 #else
748 abort();
749 #endif
751 if (eax)
752 *eax = vec[0];
753 if (ebx)
754 *ebx = vec[1];
755 if (ecx)
756 *ecx = vec[2];
757 if (edx)
758 *edx = vec[3];
761 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
763 uint32_t eax, ebx, ecx, edx;
765 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
766 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
768 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
769 if (family) {
770 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
772 if (model) {
773 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
775 if (stepping) {
776 *stepping = eax & 0x0F;
780 /* CPU class name definitions: */
782 /* Return type name for a given CPU model name
783 * Caller is responsible for freeing the returned string.
785 static char *x86_cpu_type_name(const char *model_name)
787 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
790 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
792 ObjectClass *oc;
793 char *typename = x86_cpu_type_name(cpu_model);
794 oc = object_class_by_name(typename);
795 g_free(typename);
796 return oc;
799 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
801 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
802 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
803 return g_strndup(class_name,
804 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
807 struct X86CPUDefinition {
808 const char *name;
809 uint32_t level;
810 uint32_t xlevel;
811 /* vendor is zero-terminated, 12 character ASCII string */
812 char vendor[CPUID_VENDOR_SZ + 1];
813 int family;
814 int model;
815 int stepping;
816 FeatureWordArray features;
817 const char *model_id;
820 static X86CPUDefinition builtin_x86_defs[] = {
822 .name = "qemu64",
823 .level = 0xd,
824 .vendor = CPUID_VENDOR_AMD,
825 .family = 6,
826 .model = 6,
827 .stepping = 3,
828 .features[FEAT_1_EDX] =
829 PPRO_FEATURES |
830 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
831 CPUID_PSE36,
832 .features[FEAT_1_ECX] =
833 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
834 .features[FEAT_8000_0001_EDX] =
835 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
836 .features[FEAT_8000_0001_ECX] =
837 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
838 .xlevel = 0x8000000A,
839 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
842 .name = "phenom",
843 .level = 5,
844 .vendor = CPUID_VENDOR_AMD,
845 .family = 16,
846 .model = 2,
847 .stepping = 3,
848 /* Missing: CPUID_HT */
849 .features[FEAT_1_EDX] =
850 PPRO_FEATURES |
851 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
852 CPUID_PSE36 | CPUID_VME,
853 .features[FEAT_1_ECX] =
854 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
855 CPUID_EXT_POPCNT,
856 .features[FEAT_8000_0001_EDX] =
857 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
858 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
859 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
860 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
861 CPUID_EXT3_CR8LEG,
862 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
863 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
864 .features[FEAT_8000_0001_ECX] =
865 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
866 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
867 /* Missing: CPUID_SVM_LBRV */
868 .features[FEAT_SVM] =
869 CPUID_SVM_NPT,
870 .xlevel = 0x8000001A,
871 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
874 .name = "core2duo",
875 .level = 10,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 6,
878 .model = 15,
879 .stepping = 11,
880 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
881 .features[FEAT_1_EDX] =
882 PPRO_FEATURES |
883 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
884 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
885 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
886 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
887 .features[FEAT_1_ECX] =
888 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
889 CPUID_EXT_CX16,
890 .features[FEAT_8000_0001_EDX] =
891 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
892 .features[FEAT_8000_0001_ECX] =
893 CPUID_EXT3_LAHF_LM,
894 .xlevel = 0x80000008,
895 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
898 .name = "kvm64",
899 .level = 0xd,
900 .vendor = CPUID_VENDOR_INTEL,
901 .family = 15,
902 .model = 6,
903 .stepping = 1,
904 /* Missing: CPUID_HT */
905 .features[FEAT_1_EDX] =
906 PPRO_FEATURES | CPUID_VME |
907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
908 CPUID_PSE36,
909 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
910 .features[FEAT_1_ECX] =
911 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
912 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
913 .features[FEAT_8000_0001_EDX] =
914 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
915 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
916 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
917 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
918 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
919 .features[FEAT_8000_0001_ECX] =
921 .xlevel = 0x80000008,
922 .model_id = "Common KVM processor"
925 .name = "qemu32",
926 .level = 4,
927 .vendor = CPUID_VENDOR_INTEL,
928 .family = 6,
929 .model = 6,
930 .stepping = 3,
931 .features[FEAT_1_EDX] =
932 PPRO_FEATURES,
933 .features[FEAT_1_ECX] =
934 CPUID_EXT_SSE3,
935 .xlevel = 0x80000004,
936 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
939 .name = "kvm32",
940 .level = 5,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 15,
943 .model = 6,
944 .stepping = 1,
945 .features[FEAT_1_EDX] =
946 PPRO_FEATURES | CPUID_VME |
947 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
948 .features[FEAT_1_ECX] =
949 CPUID_EXT_SSE3,
950 .features[FEAT_8000_0001_ECX] =
952 .xlevel = 0x80000008,
953 .model_id = "Common 32-bit KVM processor"
956 .name = "coreduo",
957 .level = 10,
958 .vendor = CPUID_VENDOR_INTEL,
959 .family = 6,
960 .model = 14,
961 .stepping = 8,
962 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
963 .features[FEAT_1_EDX] =
964 PPRO_FEATURES | CPUID_VME |
965 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
966 CPUID_SS,
967 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
968 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_NX,
973 .xlevel = 0x80000008,
974 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
977 .name = "486",
978 .level = 1,
979 .vendor = CPUID_VENDOR_INTEL,
980 .family = 4,
981 .model = 8,
982 .stepping = 0,
983 .features[FEAT_1_EDX] =
984 I486_FEATURES,
985 .xlevel = 0,
986 .model_id = "",
989 .name = "pentium",
990 .level = 1,
991 .vendor = CPUID_VENDOR_INTEL,
992 .family = 5,
993 .model = 4,
994 .stepping = 3,
995 .features[FEAT_1_EDX] =
996 PENTIUM_FEATURES,
997 .xlevel = 0,
998 .model_id = "",
1001 .name = "pentium2",
1002 .level = 2,
1003 .vendor = CPUID_VENDOR_INTEL,
1004 .family = 6,
1005 .model = 5,
1006 .stepping = 2,
1007 .features[FEAT_1_EDX] =
1008 PENTIUM2_FEATURES,
1009 .xlevel = 0,
1010 .model_id = "",
1013 .name = "pentium3",
1014 .level = 3,
1015 .vendor = CPUID_VENDOR_INTEL,
1016 .family = 6,
1017 .model = 7,
1018 .stepping = 3,
1019 .features[FEAT_1_EDX] =
1020 PENTIUM3_FEATURES,
1021 .xlevel = 0,
1022 .model_id = "",
1025 .name = "athlon",
1026 .level = 2,
1027 .vendor = CPUID_VENDOR_AMD,
1028 .family = 6,
1029 .model = 2,
1030 .stepping = 3,
1031 .features[FEAT_1_EDX] =
1032 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1033 CPUID_MCA,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1036 .xlevel = 0x80000008,
1037 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1040 .name = "n270",
1041 .level = 10,
1042 .vendor = CPUID_VENDOR_INTEL,
1043 .family = 6,
1044 .model = 28,
1045 .stepping = 2,
1046 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1047 .features[FEAT_1_EDX] =
1048 PPRO_FEATURES |
1049 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1050 CPUID_ACPI | CPUID_SS,
1051 /* Some CPUs got no CPUID_SEP */
1052 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1053 * CPUID_EXT_XTPR */
1054 .features[FEAT_1_ECX] =
1055 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1056 CPUID_EXT_MOVBE,
1057 .features[FEAT_8000_0001_EDX] =
1058 CPUID_EXT2_NX,
1059 .features[FEAT_8000_0001_ECX] =
1060 CPUID_EXT3_LAHF_LM,
1061 .xlevel = 0x80000008,
1062 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1065 .name = "Conroe",
1066 .level = 10,
1067 .vendor = CPUID_VENDOR_INTEL,
1068 .family = 6,
1069 .model = 15,
1070 .stepping = 3,
1071 .features[FEAT_1_EDX] =
1072 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1073 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1074 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1075 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1076 CPUID_DE | CPUID_FP87,
1077 .features[FEAT_1_ECX] =
1078 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1079 .features[FEAT_8000_0001_EDX] =
1080 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1081 .features[FEAT_8000_0001_ECX] =
1082 CPUID_EXT3_LAHF_LM,
1083 .xlevel = 0x80000008,
1084 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1087 .name = "Penryn",
1088 .level = 10,
1089 .vendor = CPUID_VENDOR_INTEL,
1090 .family = 6,
1091 .model = 23,
1092 .stepping = 3,
1093 .features[FEAT_1_EDX] =
1094 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1095 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1096 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1097 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1098 CPUID_DE | CPUID_FP87,
1099 .features[FEAT_1_ECX] =
1100 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1101 CPUID_EXT_SSE3,
1102 .features[FEAT_8000_0001_EDX] =
1103 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1104 .features[FEAT_8000_0001_ECX] =
1105 CPUID_EXT3_LAHF_LM,
1106 .xlevel = 0x80000008,
1107 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1110 .name = "Nehalem",
1111 .level = 11,
1112 .vendor = CPUID_VENDOR_INTEL,
1113 .family = 6,
1114 .model = 26,
1115 .stepping = 3,
1116 .features[FEAT_1_EDX] =
1117 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1118 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1119 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1120 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1121 CPUID_DE | CPUID_FP87,
1122 .features[FEAT_1_ECX] =
1123 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1124 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1125 .features[FEAT_8000_0001_EDX] =
1126 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1127 .features[FEAT_8000_0001_ECX] =
1128 CPUID_EXT3_LAHF_LM,
1129 .xlevel = 0x80000008,
1130 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1133 .name = "Nehalem-IBRS",
1134 .level = 11,
1135 .vendor = CPUID_VENDOR_INTEL,
1136 .family = 6,
1137 .model = 26,
1138 .stepping = 3,
1139 .features[FEAT_1_EDX] =
1140 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1141 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1142 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1143 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1144 CPUID_DE | CPUID_FP87,
1145 .features[FEAT_1_ECX] =
1146 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1147 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1148 .features[FEAT_7_0_EDX] =
1149 CPUID_7_0_EDX_SPEC_CTRL,
1150 .features[FEAT_8000_0001_EDX] =
1151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1152 .features[FEAT_8000_0001_ECX] =
1153 CPUID_EXT3_LAHF_LM,
1154 .xlevel = 0x80000008,
1155 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1158 .name = "Westmere",
1159 .level = 11,
1160 .vendor = CPUID_VENDOR_INTEL,
1161 .family = 6,
1162 .model = 44,
1163 .stepping = 1,
1164 .features[FEAT_1_EDX] =
1165 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1166 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1167 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1168 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1169 CPUID_DE | CPUID_FP87,
1170 .features[FEAT_1_ECX] =
1171 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1172 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1173 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1176 .features[FEAT_8000_0001_ECX] =
1177 CPUID_EXT3_LAHF_LM,
1178 .features[FEAT_6_EAX] =
1179 CPUID_6_EAX_ARAT,
1180 .xlevel = 0x80000008,
1181 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1184 .name = "Westmere-IBRS",
1185 .level = 11,
1186 .vendor = CPUID_VENDOR_INTEL,
1187 .family = 6,
1188 .model = 44,
1189 .stepping = 1,
1190 .features[FEAT_1_EDX] =
1191 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1192 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1193 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1194 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1195 CPUID_DE | CPUID_FP87,
1196 .features[FEAT_1_ECX] =
1197 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1198 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1199 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1202 .features[FEAT_8000_0001_ECX] =
1203 CPUID_EXT3_LAHF_LM,
1204 .features[FEAT_7_0_EDX] =
1205 CPUID_7_0_EDX_SPEC_CTRL,
1206 .features[FEAT_6_EAX] =
1207 CPUID_6_EAX_ARAT,
1208 .xlevel = 0x80000008,
1209 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1212 .name = "SandyBridge",
1213 .level = 0xd,
1214 .vendor = CPUID_VENDOR_INTEL,
1215 .family = 6,
1216 .model = 42,
1217 .stepping = 1,
1218 .features[FEAT_1_EDX] =
1219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1223 CPUID_DE | CPUID_FP87,
1224 .features[FEAT_1_ECX] =
1225 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1226 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1227 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1228 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1229 CPUID_EXT_SSE3,
1230 .features[FEAT_8000_0001_EDX] =
1231 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1232 CPUID_EXT2_SYSCALL,
1233 .features[FEAT_8000_0001_ECX] =
1234 CPUID_EXT3_LAHF_LM,
1235 .features[FEAT_XSAVE] =
1236 CPUID_XSAVE_XSAVEOPT,
1237 .features[FEAT_6_EAX] =
1238 CPUID_6_EAX_ARAT,
1239 .xlevel = 0x80000008,
1240 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1243 .name = "SandyBridge-IBRS",
1244 .level = 0xd,
1245 .vendor = CPUID_VENDOR_INTEL,
1246 .family = 6,
1247 .model = 42,
1248 .stepping = 1,
1249 .features[FEAT_1_EDX] =
1250 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1251 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1252 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1253 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1254 CPUID_DE | CPUID_FP87,
1255 .features[FEAT_1_ECX] =
1256 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1257 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1258 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1259 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1260 CPUID_EXT_SSE3,
1261 .features[FEAT_8000_0001_EDX] =
1262 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1263 CPUID_EXT2_SYSCALL,
1264 .features[FEAT_8000_0001_ECX] =
1265 CPUID_EXT3_LAHF_LM,
1266 .features[FEAT_7_0_EDX] =
1267 CPUID_7_0_EDX_SPEC_CTRL,
1268 .features[FEAT_XSAVE] =
1269 CPUID_XSAVE_XSAVEOPT,
1270 .features[FEAT_6_EAX] =
1271 CPUID_6_EAX_ARAT,
1272 .xlevel = 0x80000008,
1273 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1276 .name = "IvyBridge",
1277 .level = 0xd,
1278 .vendor = CPUID_VENDOR_INTEL,
1279 .family = 6,
1280 .model = 58,
1281 .stepping = 9,
1282 .features[FEAT_1_EDX] =
1283 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1284 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1285 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1286 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1287 CPUID_DE | CPUID_FP87,
1288 .features[FEAT_1_ECX] =
1289 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1290 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1291 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1292 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1293 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1294 .features[FEAT_7_0_EBX] =
1295 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1296 CPUID_7_0_EBX_ERMS,
1297 .features[FEAT_8000_0001_EDX] =
1298 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1299 CPUID_EXT2_SYSCALL,
1300 .features[FEAT_8000_0001_ECX] =
1301 CPUID_EXT3_LAHF_LM,
1302 .features[FEAT_XSAVE] =
1303 CPUID_XSAVE_XSAVEOPT,
1304 .features[FEAT_6_EAX] =
1305 CPUID_6_EAX_ARAT,
1306 .xlevel = 0x80000008,
1307 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1310 .name = "IvyBridge-IBRS",
1311 .level = 0xd,
1312 .vendor = CPUID_VENDOR_INTEL,
1313 .family = 6,
1314 .model = 58,
1315 .stepping = 9,
1316 .features[FEAT_1_EDX] =
1317 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1318 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1319 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1320 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1321 CPUID_DE | CPUID_FP87,
1322 .features[FEAT_1_ECX] =
1323 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1324 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1325 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1326 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1327 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1328 .features[FEAT_7_0_EBX] =
1329 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_ERMS,
1331 .features[FEAT_8000_0001_EDX] =
1332 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1333 CPUID_EXT2_SYSCALL,
1334 .features[FEAT_8000_0001_ECX] =
1335 CPUID_EXT3_LAHF_LM,
1336 .features[FEAT_7_0_EDX] =
1337 CPUID_7_0_EDX_SPEC_CTRL,
1338 .features[FEAT_XSAVE] =
1339 CPUID_XSAVE_XSAVEOPT,
1340 .features[FEAT_6_EAX] =
1341 CPUID_6_EAX_ARAT,
1342 .xlevel = 0x80000008,
1343 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1346 .name = "Haswell-noTSX",
1347 .level = 0xd,
1348 .vendor = CPUID_VENDOR_INTEL,
1349 .family = 6,
1350 .model = 60,
1351 .stepping = 1,
1352 .features[FEAT_1_EDX] =
1353 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1354 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1355 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1356 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1357 CPUID_DE | CPUID_FP87,
1358 .features[FEAT_1_ECX] =
1359 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1360 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1361 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1362 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1363 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1364 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1365 .features[FEAT_8000_0001_EDX] =
1366 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1367 CPUID_EXT2_SYSCALL,
1368 .features[FEAT_8000_0001_ECX] =
1369 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1370 .features[FEAT_7_0_EBX] =
1371 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1372 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1373 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1374 .features[FEAT_XSAVE] =
1375 CPUID_XSAVE_XSAVEOPT,
1376 .features[FEAT_6_EAX] =
1377 CPUID_6_EAX_ARAT,
1378 .xlevel = 0x80000008,
1379 .model_id = "Intel Core Processor (Haswell, no TSX)",
1382 .name = "Haswell-noTSX-IBRS",
1383 .level = 0xd,
1384 .vendor = CPUID_VENDOR_INTEL,
1385 .family = 6,
1386 .model = 60,
1387 .stepping = 1,
1388 .features[FEAT_1_EDX] =
1389 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1390 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1391 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1392 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1393 CPUID_DE | CPUID_FP87,
1394 .features[FEAT_1_ECX] =
1395 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1396 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1397 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1398 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1399 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1400 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1401 .features[FEAT_8000_0001_EDX] =
1402 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1403 CPUID_EXT2_SYSCALL,
1404 .features[FEAT_8000_0001_ECX] =
1405 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1406 .features[FEAT_7_0_EDX] =
1407 CPUID_7_0_EDX_SPEC_CTRL,
1408 .features[FEAT_7_0_EBX] =
1409 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1410 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1411 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1412 .features[FEAT_XSAVE] =
1413 CPUID_XSAVE_XSAVEOPT,
1414 .features[FEAT_6_EAX] =
1415 CPUID_6_EAX_ARAT,
1416 .xlevel = 0x80000008,
1417 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1420 .name = "Haswell",
1421 .level = 0xd,
1422 .vendor = CPUID_VENDOR_INTEL,
1423 .family = 6,
1424 .model = 60,
1425 .stepping = 4,
1426 .features[FEAT_1_EDX] =
1427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1431 CPUID_DE | CPUID_FP87,
1432 .features[FEAT_1_ECX] =
1433 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1434 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1435 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1436 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1437 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1438 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1439 .features[FEAT_8000_0001_EDX] =
1440 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1441 CPUID_EXT2_SYSCALL,
1442 .features[FEAT_8000_0001_ECX] =
1443 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1444 .features[FEAT_7_0_EBX] =
1445 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1446 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1447 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1448 CPUID_7_0_EBX_RTM,
1449 .features[FEAT_XSAVE] =
1450 CPUID_XSAVE_XSAVEOPT,
1451 .features[FEAT_6_EAX] =
1452 CPUID_6_EAX_ARAT,
1453 .xlevel = 0x80000008,
1454 .model_id = "Intel Core Processor (Haswell)",
1457 .name = "Haswell-IBRS",
1458 .level = 0xd,
1459 .vendor = CPUID_VENDOR_INTEL,
1460 .family = 6,
1461 .model = 60,
1462 .stepping = 4,
1463 .features[FEAT_1_EDX] =
1464 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1465 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1466 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1467 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1468 CPUID_DE | CPUID_FP87,
1469 .features[FEAT_1_ECX] =
1470 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1471 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1472 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1473 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1474 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1475 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1476 .features[FEAT_8000_0001_EDX] =
1477 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1478 CPUID_EXT2_SYSCALL,
1479 .features[FEAT_8000_0001_ECX] =
1480 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1481 .features[FEAT_7_0_EDX] =
1482 CPUID_7_0_EDX_SPEC_CTRL,
1483 .features[FEAT_7_0_EBX] =
1484 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1485 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1486 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1487 CPUID_7_0_EBX_RTM,
1488 .features[FEAT_XSAVE] =
1489 CPUID_XSAVE_XSAVEOPT,
1490 .features[FEAT_6_EAX] =
1491 CPUID_6_EAX_ARAT,
1492 .xlevel = 0x80000008,
1493 .model_id = "Intel Core Processor (Haswell, IBRS)",
1496 .name = "Broadwell-noTSX",
1497 .level = 0xd,
1498 .vendor = CPUID_VENDOR_INTEL,
1499 .family = 6,
1500 .model = 61,
1501 .stepping = 2,
1502 .features[FEAT_1_EDX] =
1503 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1504 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1505 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1506 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1507 CPUID_DE | CPUID_FP87,
1508 .features[FEAT_1_ECX] =
1509 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1510 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1511 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1512 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1513 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1514 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1515 .features[FEAT_8000_0001_EDX] =
1516 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1517 CPUID_EXT2_SYSCALL,
1518 .features[FEAT_8000_0001_ECX] =
1519 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1520 .features[FEAT_7_0_EBX] =
1521 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1522 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1523 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1524 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1525 CPUID_7_0_EBX_SMAP,
1526 .features[FEAT_XSAVE] =
1527 CPUID_XSAVE_XSAVEOPT,
1528 .features[FEAT_6_EAX] =
1529 CPUID_6_EAX_ARAT,
1530 .xlevel = 0x80000008,
1531 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1534 .name = "Broadwell-noTSX-IBRS",
1535 .level = 0xd,
1536 .vendor = CPUID_VENDOR_INTEL,
1537 .family = 6,
1538 .model = 61,
1539 .stepping = 2,
1540 .features[FEAT_1_EDX] =
1541 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1542 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1543 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1544 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1545 CPUID_DE | CPUID_FP87,
1546 .features[FEAT_1_ECX] =
1547 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1548 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1549 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1550 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1551 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1552 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1553 .features[FEAT_8000_0001_EDX] =
1554 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1555 CPUID_EXT2_SYSCALL,
1556 .features[FEAT_8000_0001_ECX] =
1557 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1558 .features[FEAT_7_0_EDX] =
1559 CPUID_7_0_EDX_SPEC_CTRL,
1560 .features[FEAT_7_0_EBX] =
1561 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1562 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1563 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1564 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1565 CPUID_7_0_EBX_SMAP,
1566 .features[FEAT_XSAVE] =
1567 CPUID_XSAVE_XSAVEOPT,
1568 .features[FEAT_6_EAX] =
1569 CPUID_6_EAX_ARAT,
1570 .xlevel = 0x80000008,
1571 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1574 .name = "Broadwell",
1575 .level = 0xd,
1576 .vendor = CPUID_VENDOR_INTEL,
1577 .family = 6,
1578 .model = 61,
1579 .stepping = 2,
1580 .features[FEAT_1_EDX] =
1581 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1582 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1583 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1584 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1585 CPUID_DE | CPUID_FP87,
1586 .features[FEAT_1_ECX] =
1587 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1588 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1589 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1590 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1591 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1592 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1593 .features[FEAT_8000_0001_EDX] =
1594 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1595 CPUID_EXT2_SYSCALL,
1596 .features[FEAT_8000_0001_ECX] =
1597 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1598 .features[FEAT_7_0_EBX] =
1599 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1600 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1601 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1602 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1603 CPUID_7_0_EBX_SMAP,
1604 .features[FEAT_XSAVE] =
1605 CPUID_XSAVE_XSAVEOPT,
1606 .features[FEAT_6_EAX] =
1607 CPUID_6_EAX_ARAT,
1608 .xlevel = 0x80000008,
1609 .model_id = "Intel Core Processor (Broadwell)",
1612 .name = "Broadwell-IBRS",
1613 .level = 0xd,
1614 .vendor = CPUID_VENDOR_INTEL,
1615 .family = 6,
1616 .model = 61,
1617 .stepping = 2,
1618 .features[FEAT_1_EDX] =
1619 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1620 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1621 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1622 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1623 CPUID_DE | CPUID_FP87,
1624 .features[FEAT_1_ECX] =
1625 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1626 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1627 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1628 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1629 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1630 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1631 .features[FEAT_8000_0001_EDX] =
1632 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1633 CPUID_EXT2_SYSCALL,
1634 .features[FEAT_8000_0001_ECX] =
1635 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1636 .features[FEAT_7_0_EDX] =
1637 CPUID_7_0_EDX_SPEC_CTRL,
1638 .features[FEAT_7_0_EBX] =
1639 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1640 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1641 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1642 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1643 CPUID_7_0_EBX_SMAP,
1644 .features[FEAT_XSAVE] =
1645 CPUID_XSAVE_XSAVEOPT,
1646 .features[FEAT_6_EAX] =
1647 CPUID_6_EAX_ARAT,
1648 .xlevel = 0x80000008,
1649 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1652 .name = "Skylake-Client",
1653 .level = 0xd,
1654 .vendor = CPUID_VENDOR_INTEL,
1655 .family = 6,
1656 .model = 94,
1657 .stepping = 3,
1658 .features[FEAT_1_EDX] =
1659 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1660 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1661 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1662 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1663 CPUID_DE | CPUID_FP87,
1664 .features[FEAT_1_ECX] =
1665 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1666 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1667 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1668 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1669 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1670 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1671 .features[FEAT_8000_0001_EDX] =
1672 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1673 CPUID_EXT2_SYSCALL,
1674 .features[FEAT_8000_0001_ECX] =
1675 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1676 .features[FEAT_7_0_EBX] =
1677 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1678 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1679 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1680 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1681 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1682 /* Missing: XSAVES (not supported by some Linux versions,
1683 * including v4.1 to v4.12).
1684 * KVM doesn't yet expose any XSAVES state save component,
1685 * and the only one defined in Skylake (processor tracing)
1686 * probably will block migration anyway.
1688 .features[FEAT_XSAVE] =
1689 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1690 CPUID_XSAVE_XGETBV1,
1691 .features[FEAT_6_EAX] =
1692 CPUID_6_EAX_ARAT,
1693 .xlevel = 0x80000008,
1694 .model_id = "Intel Core Processor (Skylake)",
1697 .name = "Skylake-Client-IBRS",
1698 .level = 0xd,
1699 .vendor = CPUID_VENDOR_INTEL,
1700 .family = 6,
1701 .model = 94,
1702 .stepping = 3,
1703 .features[FEAT_1_EDX] =
1704 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1705 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1706 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1707 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1708 CPUID_DE | CPUID_FP87,
1709 .features[FEAT_1_ECX] =
1710 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1711 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1712 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1713 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1714 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1715 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1716 .features[FEAT_8000_0001_EDX] =
1717 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1718 CPUID_EXT2_SYSCALL,
1719 .features[FEAT_8000_0001_ECX] =
1720 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1721 .features[FEAT_7_0_EDX] =
1722 CPUID_7_0_EDX_SPEC_CTRL,
1723 .features[FEAT_7_0_EBX] =
1724 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1725 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1726 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1727 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1728 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1729 /* Missing: XSAVES (not supported by some Linux versions,
1730 * including v4.1 to v4.12).
1731 * KVM doesn't yet expose any XSAVES state save component,
1732 * and the only one defined in Skylake (processor tracing)
1733 * probably will block migration anyway.
1735 .features[FEAT_XSAVE] =
1736 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1737 CPUID_XSAVE_XGETBV1,
1738 .features[FEAT_6_EAX] =
1739 CPUID_6_EAX_ARAT,
1740 .xlevel = 0x80000008,
1741 .model_id = "Intel Core Processor (Skylake, IBRS)",
1744 .name = "Skylake-Server",
1745 .level = 0xd,
1746 .vendor = CPUID_VENDOR_INTEL,
1747 .family = 6,
1748 .model = 85,
1749 .stepping = 4,
1750 .features[FEAT_1_EDX] =
1751 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1752 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1753 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1754 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1755 CPUID_DE | CPUID_FP87,
1756 .features[FEAT_1_ECX] =
1757 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1758 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1759 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1760 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1761 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1762 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1763 .features[FEAT_8000_0001_EDX] =
1764 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1765 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1766 .features[FEAT_8000_0001_ECX] =
1767 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1768 .features[FEAT_7_0_EBX] =
1769 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1770 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1771 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1772 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1773 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1774 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1775 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1776 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1777 /* Missing: XSAVES (not supported by some Linux versions,
1778 * including v4.1 to v4.12).
1779 * KVM doesn't yet expose any XSAVES state save component,
1780 * and the only one defined in Skylake (processor tracing)
1781 * probably will block migration anyway.
1783 .features[FEAT_XSAVE] =
1784 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1785 CPUID_XSAVE_XGETBV1,
1786 .features[FEAT_6_EAX] =
1787 CPUID_6_EAX_ARAT,
1788 .xlevel = 0x80000008,
1789 .model_id = "Intel Xeon Processor (Skylake)",
1792 .name = "Skylake-Server-IBRS",
1793 .level = 0xd,
1794 .vendor = CPUID_VENDOR_INTEL,
1795 .family = 6,
1796 .model = 85,
1797 .stepping = 4,
1798 .features[FEAT_1_EDX] =
1799 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1800 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1801 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1802 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1803 CPUID_DE | CPUID_FP87,
1804 .features[FEAT_1_ECX] =
1805 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1806 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1807 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1808 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1809 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1810 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1811 .features[FEAT_8000_0001_EDX] =
1812 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1813 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1814 .features[FEAT_8000_0001_ECX] =
1815 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1816 .features[FEAT_7_0_EDX] =
1817 CPUID_7_0_EDX_SPEC_CTRL,
1818 .features[FEAT_7_0_EBX] =
1819 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1820 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1821 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1822 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1823 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1824 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1825 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1826 CPUID_7_0_EBX_AVX512VL,
1827 /* Missing: XSAVES (not supported by some Linux versions,
1828 * including v4.1 to v4.12).
1829 * KVM doesn't yet expose any XSAVES state save component,
1830 * and the only one defined in Skylake (processor tracing)
1831 * probably will block migration anyway.
1833 .features[FEAT_XSAVE] =
1834 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1835 CPUID_XSAVE_XGETBV1,
1836 .features[FEAT_6_EAX] =
1837 CPUID_6_EAX_ARAT,
1838 .xlevel = 0x80000008,
1839 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1842 .name = "Opteron_G1",
1843 .level = 5,
1844 .vendor = CPUID_VENDOR_AMD,
1845 .family = 15,
1846 .model = 6,
1847 .stepping = 1,
1848 .features[FEAT_1_EDX] =
1849 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1850 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1851 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1852 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1853 CPUID_DE | CPUID_FP87,
1854 .features[FEAT_1_ECX] =
1855 CPUID_EXT_SSE3,
1856 .features[FEAT_8000_0001_EDX] =
1857 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1858 .xlevel = 0x80000008,
1859 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1862 .name = "Opteron_G2",
1863 .level = 5,
1864 .vendor = CPUID_VENDOR_AMD,
1865 .family = 15,
1866 .model = 6,
1867 .stepping = 1,
1868 .features[FEAT_1_EDX] =
1869 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1870 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1871 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1872 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1873 CPUID_DE | CPUID_FP87,
1874 .features[FEAT_1_ECX] =
1875 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1876 /* Missing: CPUID_EXT2_RDTSCP */
1877 .features[FEAT_8000_0001_EDX] =
1878 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1879 .features[FEAT_8000_0001_ECX] =
1880 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1881 .xlevel = 0x80000008,
1882 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1885 .name = "Opteron_G3",
1886 .level = 5,
1887 .vendor = CPUID_VENDOR_AMD,
1888 .family = 16,
1889 .model = 2,
1890 .stepping = 3,
1891 .features[FEAT_1_EDX] =
1892 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1893 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1894 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1895 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1896 CPUID_DE | CPUID_FP87,
1897 .features[FEAT_1_ECX] =
1898 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1899 CPUID_EXT_SSE3,
1900 /* Missing: CPUID_EXT2_RDTSCP */
1901 .features[FEAT_8000_0001_EDX] =
1902 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1903 .features[FEAT_8000_0001_ECX] =
1904 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1905 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1906 .xlevel = 0x80000008,
1907 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1910 .name = "Opteron_G4",
1911 .level = 0xd,
1912 .vendor = CPUID_VENDOR_AMD,
1913 .family = 21,
1914 .model = 1,
1915 .stepping = 2,
1916 .features[FEAT_1_EDX] =
1917 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1918 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1919 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1920 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1921 CPUID_DE | CPUID_FP87,
1922 .features[FEAT_1_ECX] =
1923 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1924 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1925 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1926 CPUID_EXT_SSE3,
1927 /* Missing: CPUID_EXT2_RDTSCP */
1928 .features[FEAT_8000_0001_EDX] =
1929 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1930 CPUID_EXT2_SYSCALL,
1931 .features[FEAT_8000_0001_ECX] =
1932 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1933 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1934 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1935 CPUID_EXT3_LAHF_LM,
1936 /* no xsaveopt! */
1937 .xlevel = 0x8000001A,
1938 .model_id = "AMD Opteron 62xx class CPU",
1941 .name = "Opteron_G5",
1942 .level = 0xd,
1943 .vendor = CPUID_VENDOR_AMD,
1944 .family = 21,
1945 .model = 2,
1946 .stepping = 0,
1947 .features[FEAT_1_EDX] =
1948 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1949 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1950 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1951 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1952 CPUID_DE | CPUID_FP87,
1953 .features[FEAT_1_ECX] =
1954 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1955 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1956 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1957 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1958 /* Missing: CPUID_EXT2_RDTSCP */
1959 .features[FEAT_8000_0001_EDX] =
1960 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1961 CPUID_EXT2_SYSCALL,
1962 .features[FEAT_8000_0001_ECX] =
1963 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1964 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1965 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1966 CPUID_EXT3_LAHF_LM,
1967 /* no xsaveopt! */
1968 .xlevel = 0x8000001A,
1969 .model_id = "AMD Opteron 63xx class CPU",
1972 .name = "EPYC",
1973 .level = 0xd,
1974 .vendor = CPUID_VENDOR_AMD,
1975 .family = 23,
1976 .model = 1,
1977 .stepping = 2,
1978 .features[FEAT_1_EDX] =
1979 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1980 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1981 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1982 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1983 CPUID_VME | CPUID_FP87,
1984 .features[FEAT_1_ECX] =
1985 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1986 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1987 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1988 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1989 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1990 .features[FEAT_8000_0001_EDX] =
1991 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1992 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1993 CPUID_EXT2_SYSCALL,
1994 .features[FEAT_8000_0001_ECX] =
1995 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1996 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1997 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1998 .features[FEAT_7_0_EBX] =
1999 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2000 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2001 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2002 CPUID_7_0_EBX_SHA_NI,
2003 /* Missing: XSAVES (not supported by some Linux versions,
2004 * including v4.1 to v4.12).
2005 * KVM doesn't yet expose any XSAVES state save component.
2007 .features[FEAT_XSAVE] =
2008 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2009 CPUID_XSAVE_XGETBV1,
2010 .features[FEAT_6_EAX] =
2011 CPUID_6_EAX_ARAT,
2012 .xlevel = 0x8000000A,
2013 .model_id = "AMD EPYC Processor",
2016 .name = "EPYC-IBPB",
2017 .level = 0xd,
2018 .vendor = CPUID_VENDOR_AMD,
2019 .family = 23,
2020 .model = 1,
2021 .stepping = 2,
2022 .features[FEAT_1_EDX] =
2023 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2024 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2025 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2026 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2027 CPUID_VME | CPUID_FP87,
2028 .features[FEAT_1_ECX] =
2029 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2030 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2031 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2032 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2033 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2034 .features[FEAT_8000_0001_EDX] =
2035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2036 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2037 CPUID_EXT2_SYSCALL,
2038 .features[FEAT_8000_0001_ECX] =
2039 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2040 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2041 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2042 .features[FEAT_8000_0008_EBX] =
2043 CPUID_8000_0008_EBX_IBPB,
2044 .features[FEAT_7_0_EBX] =
2045 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2046 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2047 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2048 CPUID_7_0_EBX_SHA_NI,
2049 /* Missing: XSAVES (not supported by some Linux versions,
2050 * including v4.1 to v4.12).
2051 * KVM doesn't yet expose any XSAVES state save component.
2053 .features[FEAT_XSAVE] =
2054 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2055 CPUID_XSAVE_XGETBV1,
2056 .features[FEAT_6_EAX] =
2057 CPUID_6_EAX_ARAT,
2058 .xlevel = 0x8000000A,
2059 .model_id = "AMD EPYC Processor (with IBPB)",
2063 typedef struct PropValue {
2064 const char *prop, *value;
2065 } PropValue;
2067 /* KVM-specific features that are automatically added/removed
2068 * from all CPU models when KVM is enabled.
2070 static PropValue kvm_default_props[] = {
2071 { "kvmclock", "on" },
2072 { "kvm-nopiodelay", "on" },
2073 { "kvm-asyncpf", "on" },
2074 { "kvm-steal-time", "on" },
2075 { "kvm-pv-eoi", "on" },
2076 { "kvmclock-stable-bit", "on" },
2077 { "x2apic", "on" },
2078 { "acpi", "off" },
2079 { "monitor", "off" },
2080 { "svm", "off" },
2081 { NULL, NULL },
2084 /* TCG-specific defaults that override all CPU models when using TCG
2086 static PropValue tcg_default_props[] = {
2087 { "vme", "off" },
2088 { NULL, NULL },
2092 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2094 PropValue *pv;
2095 for (pv = kvm_default_props; pv->prop; pv++) {
2096 if (!strcmp(pv->prop, prop)) {
2097 pv->value = value;
2098 break;
2102 /* It is valid to call this function only for properties that
2103 * are already present in the kvm_default_props table.
2105 assert(pv->prop);
2108 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2109 bool migratable_only);
2111 static bool lmce_supported(void)
2113 uint64_t mce_cap = 0;
2115 #ifdef CONFIG_KVM
2116 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2117 return false;
2119 #endif
2121 return !!(mce_cap & MCG_LMCE_P);
2124 #define CPUID_MODEL_ID_SZ 48
2127 * cpu_x86_fill_model_id:
2128 * Get CPUID model ID string from host CPU.
2130 * @str should have at least CPUID_MODEL_ID_SZ bytes
2132 * The function does NOT add a null terminator to the string
2133 * automatically.
2135 static int cpu_x86_fill_model_id(char *str)
2137 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2138 int i;
2140 for (i = 0; i < 3; i++) {
2141 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2142 memcpy(str + i * 16 + 0, &eax, 4);
2143 memcpy(str + i * 16 + 4, &ebx, 4);
2144 memcpy(str + i * 16 + 8, &ecx, 4);
2145 memcpy(str + i * 16 + 12, &edx, 4);
2147 return 0;
2150 static Property max_x86_cpu_properties[] = {
2151 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2152 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2153 DEFINE_PROP_END_OF_LIST()
2156 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2158 DeviceClass *dc = DEVICE_CLASS(oc);
2159 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2161 xcc->ordering = 9;
2163 xcc->model_description =
2164 "Enables all features supported by the accelerator in the current host";
2166 dc->props = max_x86_cpu_properties;
2169 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2171 static void max_x86_cpu_initfn(Object *obj)
2173 X86CPU *cpu = X86_CPU(obj);
2174 CPUX86State *env = &cpu->env;
2175 KVMState *s = kvm_state;
2177 /* We can't fill the features array here because we don't know yet if
2178 * "migratable" is true or false.
2180 cpu->max_features = true;
2182 if (accel_uses_host_cpuid()) {
2183 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2184 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2185 int family, model, stepping;
2186 X86CPUDefinition host_cpudef = { };
2187 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2189 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2190 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2192 host_vendor_fms(vendor, &family, &model, &stepping);
2194 cpu_x86_fill_model_id(model_id);
2196 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2197 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2198 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2199 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2200 &error_abort);
2201 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2202 &error_abort);
2204 if (kvm_enabled()) {
2205 env->cpuid_min_level =
2206 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2207 env->cpuid_min_xlevel =
2208 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2209 env->cpuid_min_xlevel2 =
2210 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2211 } else {
2212 env->cpuid_min_level =
2213 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2214 env->cpuid_min_xlevel =
2215 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2216 env->cpuid_min_xlevel2 =
2217 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2220 if (lmce_supported()) {
2221 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2223 } else {
2224 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2225 "vendor", &error_abort);
2226 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2227 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2228 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2229 object_property_set_str(OBJECT(cpu),
2230 "QEMU TCG CPU version " QEMU_HW_VERSION,
2231 "model-id", &error_abort);
2234 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2237 static const TypeInfo max_x86_cpu_type_info = {
2238 .name = X86_CPU_TYPE_NAME("max"),
2239 .parent = TYPE_X86_CPU,
2240 .instance_init = max_x86_cpu_initfn,
2241 .class_init = max_x86_cpu_class_init,
2244 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2245 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2247 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2249 xcc->host_cpuid_required = true;
2250 xcc->ordering = 8;
2252 if (kvm_enabled()) {
2253 xcc->model_description =
2254 "KVM processor with all supported host features ";
2255 } else if (hvf_enabled()) {
2256 xcc->model_description =
2257 "HVF processor with all supported host features ";
2261 static const TypeInfo host_x86_cpu_type_info = {
2262 .name = X86_CPU_TYPE_NAME("host"),
2263 .parent = X86_CPU_TYPE_NAME("max"),
2264 .class_init = host_x86_cpu_class_init,
2267 #endif
2269 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2271 FeatureWordInfo *f = &feature_word_info[w];
2272 int i;
2274 for (i = 0; i < 32; ++i) {
2275 if ((1UL << i) & mask) {
2276 const char *reg = get_register_name_32(f->cpuid_reg);
2277 assert(reg);
2278 warn_report("%s doesn't support requested feature: "
2279 "CPUID.%02XH:%s%s%s [bit %d]",
2280 accel_uses_host_cpuid() ? "host" : "TCG",
2281 f->cpuid_eax, reg,
2282 f->feat_names[i] ? "." : "",
2283 f->feat_names[i] ? f->feat_names[i] : "", i);
2288 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2289 const char *name, void *opaque,
2290 Error **errp)
2292 X86CPU *cpu = X86_CPU(obj);
2293 CPUX86State *env = &cpu->env;
2294 int64_t value;
2296 value = (env->cpuid_version >> 8) & 0xf;
2297 if (value == 0xf) {
2298 value += (env->cpuid_version >> 20) & 0xff;
2300 visit_type_int(v, name, &value, errp);
2303 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2304 const char *name, void *opaque,
2305 Error **errp)
2307 X86CPU *cpu = X86_CPU(obj);
2308 CPUX86State *env = &cpu->env;
2309 const int64_t min = 0;
2310 const int64_t max = 0xff + 0xf;
2311 Error *local_err = NULL;
2312 int64_t value;
2314 visit_type_int(v, name, &value, &local_err);
2315 if (local_err) {
2316 error_propagate(errp, local_err);
2317 return;
2319 if (value < min || value > max) {
2320 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2321 name ? name : "null", value, min, max);
2322 return;
2325 env->cpuid_version &= ~0xff00f00;
2326 if (value > 0x0f) {
2327 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2328 } else {
2329 env->cpuid_version |= value << 8;
2333 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2334 const char *name, void *opaque,
2335 Error **errp)
2337 X86CPU *cpu = X86_CPU(obj);
2338 CPUX86State *env = &cpu->env;
2339 int64_t value;
2341 value = (env->cpuid_version >> 4) & 0xf;
2342 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2343 visit_type_int(v, name, &value, errp);
2346 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2347 const char *name, void *opaque,
2348 Error **errp)
2350 X86CPU *cpu = X86_CPU(obj);
2351 CPUX86State *env = &cpu->env;
2352 const int64_t min = 0;
2353 const int64_t max = 0xff;
2354 Error *local_err = NULL;
2355 int64_t value;
2357 visit_type_int(v, name, &value, &local_err);
2358 if (local_err) {
2359 error_propagate(errp, local_err);
2360 return;
2362 if (value < min || value > max) {
2363 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2364 name ? name : "null", value, min, max);
2365 return;
2368 env->cpuid_version &= ~0xf00f0;
2369 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2372 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2373 const char *name, void *opaque,
2374 Error **errp)
2376 X86CPU *cpu = X86_CPU(obj);
2377 CPUX86State *env = &cpu->env;
2378 int64_t value;
2380 value = env->cpuid_version & 0xf;
2381 visit_type_int(v, name, &value, errp);
2384 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2385 const char *name, void *opaque,
2386 Error **errp)
2388 X86CPU *cpu = X86_CPU(obj);
2389 CPUX86State *env = &cpu->env;
2390 const int64_t min = 0;
2391 const int64_t max = 0xf;
2392 Error *local_err = NULL;
2393 int64_t value;
2395 visit_type_int(v, name, &value, &local_err);
2396 if (local_err) {
2397 error_propagate(errp, local_err);
2398 return;
2400 if (value < min || value > max) {
2401 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2402 name ? name : "null", value, min, max);
2403 return;
2406 env->cpuid_version &= ~0xf;
2407 env->cpuid_version |= value & 0xf;
2410 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2412 X86CPU *cpu = X86_CPU(obj);
2413 CPUX86State *env = &cpu->env;
2414 char *value;
2416 value = g_malloc(CPUID_VENDOR_SZ + 1);
2417 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2418 env->cpuid_vendor3);
2419 return value;
2422 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2423 Error **errp)
2425 X86CPU *cpu = X86_CPU(obj);
2426 CPUX86State *env = &cpu->env;
2427 int i;
2429 if (strlen(value) != CPUID_VENDOR_SZ) {
2430 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2431 return;
2434 env->cpuid_vendor1 = 0;
2435 env->cpuid_vendor2 = 0;
2436 env->cpuid_vendor3 = 0;
2437 for (i = 0; i < 4; i++) {
2438 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2439 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2440 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2444 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2446 X86CPU *cpu = X86_CPU(obj);
2447 CPUX86State *env = &cpu->env;
2448 char *value;
2449 int i;
2451 value = g_malloc(48 + 1);
2452 for (i = 0; i < 48; i++) {
2453 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2455 value[48] = '\0';
2456 return value;
2459 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2460 Error **errp)
2462 X86CPU *cpu = X86_CPU(obj);
2463 CPUX86State *env = &cpu->env;
2464 int c, len, i;
2466 if (model_id == NULL) {
2467 model_id = "";
2469 len = strlen(model_id);
2470 memset(env->cpuid_model, 0, 48);
2471 for (i = 0; i < 48; i++) {
2472 if (i >= len) {
2473 c = '\0';
2474 } else {
2475 c = (uint8_t)model_id[i];
2477 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2481 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2482 void *opaque, Error **errp)
2484 X86CPU *cpu = X86_CPU(obj);
2485 int64_t value;
2487 value = cpu->env.tsc_khz * 1000;
2488 visit_type_int(v, name, &value, errp);
2491 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2492 void *opaque, Error **errp)
2494 X86CPU *cpu = X86_CPU(obj);
2495 const int64_t min = 0;
2496 const int64_t max = INT64_MAX;
2497 Error *local_err = NULL;
2498 int64_t value;
2500 visit_type_int(v, name, &value, &local_err);
2501 if (local_err) {
2502 error_propagate(errp, local_err);
2503 return;
2505 if (value < min || value > max) {
2506 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2507 name ? name : "null", value, min, max);
2508 return;
2511 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2514 /* Generic getter for "feature-words" and "filtered-features" properties */
2515 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2516 const char *name, void *opaque,
2517 Error **errp)
2519 uint32_t *array = (uint32_t *)opaque;
2520 FeatureWord w;
2521 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2522 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2523 X86CPUFeatureWordInfoList *list = NULL;
2525 for (w = 0; w < FEATURE_WORDS; w++) {
2526 FeatureWordInfo *wi = &feature_word_info[w];
2527 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2528 qwi->cpuid_input_eax = wi->cpuid_eax;
2529 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2530 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2531 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2532 qwi->features = array[w];
2534 /* List will be in reverse order, but order shouldn't matter */
2535 list_entries[w].next = list;
2536 list_entries[w].value = &word_infos[w];
2537 list = &list_entries[w];
2540 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2543 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2544 void *opaque, Error **errp)
2546 X86CPU *cpu = X86_CPU(obj);
2547 int64_t value = cpu->hyperv_spinlock_attempts;
2549 visit_type_int(v, name, &value, errp);
2552 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2553 void *opaque, Error **errp)
2555 const int64_t min = 0xFFF;
2556 const int64_t max = UINT_MAX;
2557 X86CPU *cpu = X86_CPU(obj);
2558 Error *err = NULL;
2559 int64_t value;
2561 visit_type_int(v, name, &value, &err);
2562 if (err) {
2563 error_propagate(errp, err);
2564 return;
2567 if (value < min || value > max) {
2568 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2569 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2570 object_get_typename(obj), name ? name : "null",
2571 value, min, max);
2572 return;
2574 cpu->hyperv_spinlock_attempts = value;
2577 static const PropertyInfo qdev_prop_spinlocks = {
2578 .name = "int",
2579 .get = x86_get_hv_spinlocks,
2580 .set = x86_set_hv_spinlocks,
2583 /* Convert all '_' in a feature string option name to '-', to make feature
2584 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2586 static inline void feat2prop(char *s)
2588 while ((s = strchr(s, '_'))) {
2589 *s = '-';
2593 /* Return the feature property name for a feature flag bit */
2594 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2596 /* XSAVE components are automatically enabled by other features,
2597 * so return the original feature name instead
2599 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2600 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2602 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2603 x86_ext_save_areas[comp].bits) {
2604 w = x86_ext_save_areas[comp].feature;
2605 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2609 assert(bitnr < 32);
2610 assert(w < FEATURE_WORDS);
2611 return feature_word_info[w].feat_names[bitnr];
2614 /* Compatibily hack to maintain legacy +-feat semantic,
2615 * where +-feat overwrites any feature set by
2616 * feat=on|feat even if the later is parsed after +-feat
2617 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2619 static GList *plus_features, *minus_features;
2621 static gint compare_string(gconstpointer a, gconstpointer b)
2623 return g_strcmp0(a, b);
2626 /* Parse "+feature,-feature,feature=foo" CPU feature string
2628 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2629 Error **errp)
2631 char *featurestr; /* Single 'key=value" string being parsed */
2632 static bool cpu_globals_initialized;
2633 bool ambiguous = false;
2635 if (cpu_globals_initialized) {
2636 return;
2638 cpu_globals_initialized = true;
2640 if (!features) {
2641 return;
2644 for (featurestr = strtok(features, ",");
2645 featurestr;
2646 featurestr = strtok(NULL, ",")) {
2647 const char *name;
2648 const char *val = NULL;
2649 char *eq = NULL;
2650 char num[32];
2651 GlobalProperty *prop;
2653 /* Compatibility syntax: */
2654 if (featurestr[0] == '+') {
2655 plus_features = g_list_append(plus_features,
2656 g_strdup(featurestr + 1));
2657 continue;
2658 } else if (featurestr[0] == '-') {
2659 minus_features = g_list_append(minus_features,
2660 g_strdup(featurestr + 1));
2661 continue;
2664 eq = strchr(featurestr, '=');
2665 if (eq) {
2666 *eq++ = 0;
2667 val = eq;
2668 } else {
2669 val = "on";
2672 feat2prop(featurestr);
2673 name = featurestr;
2675 if (g_list_find_custom(plus_features, name, compare_string)) {
2676 warn_report("Ambiguous CPU model string. "
2677 "Don't mix both \"+%s\" and \"%s=%s\"",
2678 name, name, val);
2679 ambiguous = true;
2681 if (g_list_find_custom(minus_features, name, compare_string)) {
2682 warn_report("Ambiguous CPU model string. "
2683 "Don't mix both \"-%s\" and \"%s=%s\"",
2684 name, name, val);
2685 ambiguous = true;
2688 /* Special case: */
2689 if (!strcmp(name, "tsc-freq")) {
2690 int ret;
2691 uint64_t tsc_freq;
2693 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2694 if (ret < 0 || tsc_freq > INT64_MAX) {
2695 error_setg(errp, "bad numerical value %s", val);
2696 return;
2698 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2699 val = num;
2700 name = "tsc-frequency";
2703 prop = g_new0(typeof(*prop), 1);
2704 prop->driver = typename;
2705 prop->property = g_strdup(name);
2706 prop->value = g_strdup(val);
2707 prop->errp = &error_fatal;
2708 qdev_prop_register_global(prop);
2711 if (ambiguous) {
2712 warn_report("Compatibility of ambiguous CPU model "
2713 "strings won't be kept on future QEMU versions");
2717 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2718 static int x86_cpu_filter_features(X86CPU *cpu);
2720 /* Check for missing features that may prevent the CPU class from
2721 * running using the current machine and accelerator.
2723 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2724 strList **missing_feats)
2726 X86CPU *xc;
2727 FeatureWord w;
2728 Error *err = NULL;
2729 strList **next = missing_feats;
2731 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2732 strList *new = g_new0(strList, 1);
2733 new->value = g_strdup("kvm");
2734 *missing_feats = new;
2735 return;
2738 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2740 x86_cpu_expand_features(xc, &err);
2741 if (err) {
2742 /* Errors at x86_cpu_expand_features should never happen,
2743 * but in case it does, just report the model as not
2744 * runnable at all using the "type" property.
2746 strList *new = g_new0(strList, 1);
2747 new->value = g_strdup("type");
2748 *next = new;
2749 next = &new->next;
2752 x86_cpu_filter_features(xc);
2754 for (w = 0; w < FEATURE_WORDS; w++) {
2755 uint32_t filtered = xc->filtered_features[w];
2756 int i;
2757 for (i = 0; i < 32; i++) {
2758 if (filtered & (1UL << i)) {
2759 strList *new = g_new0(strList, 1);
2760 new->value = g_strdup(x86_cpu_feature_name(w, i));
2761 *next = new;
2762 next = &new->next;
2767 object_unref(OBJECT(xc));
2770 /* Print all cpuid feature names in featureset
2772 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2774 int bit;
2775 bool first = true;
2777 for (bit = 0; bit < 32; bit++) {
2778 if (featureset[bit]) {
2779 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2780 first = false;
2785 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2786 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2788 ObjectClass *class_a = (ObjectClass *)a;
2789 ObjectClass *class_b = (ObjectClass *)b;
2790 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2791 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2792 const char *name_a, *name_b;
2794 if (cc_a->ordering != cc_b->ordering) {
2795 return cc_a->ordering - cc_b->ordering;
2796 } else {
2797 name_a = object_class_get_name(class_a);
2798 name_b = object_class_get_name(class_b);
2799 return strcmp(name_a, name_b);
2803 static GSList *get_sorted_cpu_model_list(void)
2805 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2806 list = g_slist_sort(list, x86_cpu_list_compare);
2807 return list;
2810 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2812 ObjectClass *oc = data;
2813 X86CPUClass *cc = X86_CPU_CLASS(oc);
2814 CPUListState *s = user_data;
2815 char *name = x86_cpu_class_get_model_name(cc);
2816 const char *desc = cc->model_description;
2817 if (!desc && cc->cpu_def) {
2818 desc = cc->cpu_def->model_id;
2821 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2822 name, desc);
2823 g_free(name);
2826 /* list available CPU models and flags */
2827 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2829 int i;
2830 CPUListState s = {
2831 .file = f,
2832 .cpu_fprintf = cpu_fprintf,
2834 GSList *list;
2836 (*cpu_fprintf)(f, "Available CPUs:\n");
2837 list = get_sorted_cpu_model_list();
2838 g_slist_foreach(list, x86_cpu_list_entry, &s);
2839 g_slist_free(list);
2841 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2842 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2843 FeatureWordInfo *fw = &feature_word_info[i];
2845 (*cpu_fprintf)(f, " ");
2846 listflags(f, cpu_fprintf, fw->feat_names);
2847 (*cpu_fprintf)(f, "\n");
2851 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2853 ObjectClass *oc = data;
2854 X86CPUClass *cc = X86_CPU_CLASS(oc);
2855 CpuDefinitionInfoList **cpu_list = user_data;
2856 CpuDefinitionInfoList *entry;
2857 CpuDefinitionInfo *info;
2859 info = g_malloc0(sizeof(*info));
2860 info->name = x86_cpu_class_get_model_name(cc);
2861 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2862 info->has_unavailable_features = true;
2863 info->q_typename = g_strdup(object_class_get_name(oc));
2864 info->migration_safe = cc->migration_safe;
2865 info->has_migration_safe = true;
2866 info->q_static = cc->static_model;
2868 entry = g_malloc0(sizeof(*entry));
2869 entry->value = info;
2870 entry->next = *cpu_list;
2871 *cpu_list = entry;
2874 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2876 CpuDefinitionInfoList *cpu_list = NULL;
2877 GSList *list = get_sorted_cpu_model_list();
2878 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2879 g_slist_free(list);
2880 return cpu_list;
2883 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2884 bool migratable_only)
2886 FeatureWordInfo *wi = &feature_word_info[w];
2887 uint32_t r;
2889 if (kvm_enabled()) {
2890 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2891 wi->cpuid_ecx,
2892 wi->cpuid_reg);
2893 } else if (hvf_enabled()) {
2894 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2895 wi->cpuid_ecx,
2896 wi->cpuid_reg);
2897 } else if (tcg_enabled()) {
2898 r = wi->tcg_features;
2899 } else {
2900 return ~0;
2902 if (migratable_only) {
2903 r &= x86_cpu_get_migratable_flags(w);
2905 return r;
2908 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2910 FeatureWord w;
2912 for (w = 0; w < FEATURE_WORDS; w++) {
2913 report_unavailable_features(w, cpu->filtered_features[w]);
2917 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2919 PropValue *pv;
2920 for (pv = props; pv->prop; pv++) {
2921 if (!pv->value) {
2922 continue;
2924 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2925 &error_abort);
2929 /* Load data from X86CPUDefinition into a X86CPU object
2931 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2933 CPUX86State *env = &cpu->env;
2934 const char *vendor;
2935 char host_vendor[CPUID_VENDOR_SZ + 1];
2936 FeatureWord w;
2938 /*NOTE: any property set by this function should be returned by
2939 * x86_cpu_static_props(), so static expansion of
2940 * query-cpu-model-expansion is always complete.
2943 /* CPU models only set _minimum_ values for level/xlevel: */
2944 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2945 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2947 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2948 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2949 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2950 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2951 for (w = 0; w < FEATURE_WORDS; w++) {
2952 env->features[w] = def->features[w];
2955 /* Special cases not set in the X86CPUDefinition structs: */
2956 /* TODO: in-kernel irqchip for hvf */
2957 if (kvm_enabled()) {
2958 if (!kvm_irqchip_in_kernel()) {
2959 x86_cpu_change_kvm_default("x2apic", "off");
2962 x86_cpu_apply_props(cpu, kvm_default_props);
2963 } else if (tcg_enabled()) {
2964 x86_cpu_apply_props(cpu, tcg_default_props);
2967 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2969 /* sysenter isn't supported in compatibility mode on AMD,
2970 * syscall isn't supported in compatibility mode on Intel.
2971 * Normally we advertise the actual CPU vendor, but you can
2972 * override this using the 'vendor' property if you want to use
2973 * KVM's sysenter/syscall emulation in compatibility mode and
2974 * when doing cross vendor migration
2976 vendor = def->vendor;
2977 if (accel_uses_host_cpuid()) {
2978 uint32_t ebx = 0, ecx = 0, edx = 0;
2979 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2980 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2981 vendor = host_vendor;
2984 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2988 /* Return a QDict containing keys for all properties that can be included
2989 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2990 * must be included in the dictionary.
2992 static QDict *x86_cpu_static_props(void)
2994 FeatureWord w;
2995 int i;
2996 static const char *props[] = {
2997 "min-level",
2998 "min-xlevel",
2999 "family",
3000 "model",
3001 "stepping",
3002 "model-id",
3003 "vendor",
3004 "lmce",
3005 NULL,
3007 static QDict *d;
3009 if (d) {
3010 return d;
3013 d = qdict_new();
3014 for (i = 0; props[i]; i++) {
3015 qdict_put_null(d, props[i]);
3018 for (w = 0; w < FEATURE_WORDS; w++) {
3019 FeatureWordInfo *fi = &feature_word_info[w];
3020 int bit;
3021 for (bit = 0; bit < 32; bit++) {
3022 if (!fi->feat_names[bit]) {
3023 continue;
3025 qdict_put_null(d, fi->feat_names[bit]);
3029 return d;
3032 /* Add an entry to @props dict, with the value for property. */
3033 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3035 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3036 &error_abort);
3038 qdict_put_obj(props, prop, value);
3041 /* Convert CPU model data from X86CPU object to a property dictionary
3042 * that can recreate exactly the same CPU model.
3044 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3046 QDict *sprops = x86_cpu_static_props();
3047 const QDictEntry *e;
3049 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3050 const char *prop = qdict_entry_key(e);
3051 x86_cpu_expand_prop(cpu, props, prop);
3055 /* Convert CPU model data from X86CPU object to a property dictionary
3056 * that can recreate exactly the same CPU model, including every
3057 * writeable QOM property.
3059 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3061 ObjectPropertyIterator iter;
3062 ObjectProperty *prop;
3064 object_property_iter_init(&iter, OBJECT(cpu));
3065 while ((prop = object_property_iter_next(&iter))) {
3066 /* skip read-only or write-only properties */
3067 if (!prop->get || !prop->set) {
3068 continue;
3071 /* "hotplugged" is the only property that is configurable
3072 * on the command-line but will be set differently on CPUs
3073 * created using "-cpu ... -smp ..." and by CPUs created
3074 * on the fly by x86_cpu_from_model() for querying. Skip it.
3076 if (!strcmp(prop->name, "hotplugged")) {
3077 continue;
3079 x86_cpu_expand_prop(cpu, props, prop->name);
3083 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3085 const QDictEntry *prop;
3086 Error *err = NULL;
3088 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3089 object_property_set_qobject(obj, qdict_entry_value(prop),
3090 qdict_entry_key(prop), &err);
3091 if (err) {
3092 break;
3096 error_propagate(errp, err);
3099 /* Create X86CPU object according to model+props specification */
3100 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3102 X86CPU *xc = NULL;
3103 X86CPUClass *xcc;
3104 Error *err = NULL;
3106 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3107 if (xcc == NULL) {
3108 error_setg(&err, "CPU model '%s' not found", model);
3109 goto out;
3112 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3113 if (props) {
3114 object_apply_props(OBJECT(xc), props, &err);
3115 if (err) {
3116 goto out;
3120 x86_cpu_expand_features(xc, &err);
3121 if (err) {
3122 goto out;
3125 out:
3126 if (err) {
3127 error_propagate(errp, err);
3128 object_unref(OBJECT(xc));
3129 xc = NULL;
3131 return xc;
3134 CpuModelExpansionInfo *
3135 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3136 CpuModelInfo *model,
3137 Error **errp)
3139 X86CPU *xc = NULL;
3140 Error *err = NULL;
3141 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3142 QDict *props = NULL;
3143 const char *base_name;
3145 xc = x86_cpu_from_model(model->name,
3146 model->has_props ?
3147 qobject_to(QDict, model->props) :
3148 NULL, &err);
3149 if (err) {
3150 goto out;
3153 props = qdict_new();
3155 switch (type) {
3156 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3157 /* Static expansion will be based on "base" only */
3158 base_name = "base";
3159 x86_cpu_to_dict(xc, props);
3160 break;
3161 case CPU_MODEL_EXPANSION_TYPE_FULL:
3162 /* As we don't return every single property, full expansion needs
3163 * to keep the original model name+props, and add extra
3164 * properties on top of that.
3166 base_name = model->name;
3167 x86_cpu_to_dict_full(xc, props);
3168 break;
3169 default:
3170 error_setg(&err, "Unsupportted expansion type");
3171 goto out;
3174 if (!props) {
3175 props = qdict_new();
3177 x86_cpu_to_dict(xc, props);
3179 ret->model = g_new0(CpuModelInfo, 1);
3180 ret->model->name = g_strdup(base_name);
3181 ret->model->props = QOBJECT(props);
3182 ret->model->has_props = true;
3184 out:
3185 object_unref(OBJECT(xc));
3186 if (err) {
3187 error_propagate(errp, err);
3188 qapi_free_CpuModelExpansionInfo(ret);
3189 ret = NULL;
3191 return ret;
3194 static gchar *x86_gdb_arch_name(CPUState *cs)
3196 #ifdef TARGET_X86_64
3197 return g_strdup("i386:x86-64");
3198 #else
3199 return g_strdup("i386");
3200 #endif
3203 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3205 X86CPUDefinition *cpudef = data;
3206 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3208 xcc->cpu_def = cpudef;
3209 xcc->migration_safe = true;
3212 static void x86_register_cpudef_type(X86CPUDefinition *def)
3214 char *typename = x86_cpu_type_name(def->name);
3215 TypeInfo ti = {
3216 .name = typename,
3217 .parent = TYPE_X86_CPU,
3218 .class_init = x86_cpu_cpudef_class_init,
3219 .class_data = def,
3222 /* AMD aliases are handled at runtime based on CPUID vendor, so
3223 * they shouldn't be set on the CPU model table.
3225 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3226 /* catch mistakes instead of silently truncating model_id when too long */
3227 assert(def->model_id && strlen(def->model_id) <= 48);
3230 type_register(&ti);
3231 g_free(typename);
3234 #if !defined(CONFIG_USER_ONLY)
3236 void cpu_clear_apic_feature(CPUX86State *env)
3238 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3241 #endif /* !CONFIG_USER_ONLY */
3243 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3244 uint32_t *eax, uint32_t *ebx,
3245 uint32_t *ecx, uint32_t *edx)
3247 X86CPU *cpu = x86_env_get_cpu(env);
3248 CPUState *cs = CPU(cpu);
3249 uint32_t pkg_offset;
3250 uint32_t limit;
3251 uint32_t signature[3];
3253 /* Calculate & apply limits for different index ranges */
3254 if (index >= 0xC0000000) {
3255 limit = env->cpuid_xlevel2;
3256 } else if (index >= 0x80000000) {
3257 limit = env->cpuid_xlevel;
3258 } else if (index >= 0x40000000) {
3259 limit = 0x40000001;
3260 } else {
3261 limit = env->cpuid_level;
3264 if (index > limit) {
3265 /* Intel documentation states that invalid EAX input will
3266 * return the same information as EAX=cpuid_level
3267 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3269 index = env->cpuid_level;
3272 switch(index) {
3273 case 0:
3274 *eax = env->cpuid_level;
3275 *ebx = env->cpuid_vendor1;
3276 *edx = env->cpuid_vendor2;
3277 *ecx = env->cpuid_vendor3;
3278 break;
3279 case 1:
3280 *eax = env->cpuid_version;
3281 *ebx = (cpu->apic_id << 24) |
3282 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3283 *ecx = env->features[FEAT_1_ECX];
3284 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3285 *ecx |= CPUID_EXT_OSXSAVE;
3287 *edx = env->features[FEAT_1_EDX];
3288 if (cs->nr_cores * cs->nr_threads > 1) {
3289 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3290 *edx |= CPUID_HT;
3292 break;
3293 case 2:
3294 /* cache info: needed for Pentium Pro compatibility */
3295 if (cpu->cache_info_passthrough) {
3296 host_cpuid(index, 0, eax, ebx, ecx, edx);
3297 break;
3299 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3300 *ebx = 0;
3301 if (!cpu->enable_l3_cache) {
3302 *ecx = 0;
3303 } else {
3304 *ecx = L3_N_DESCRIPTOR;
3306 *edx = (L1D_DESCRIPTOR << 16) | \
3307 (L1I_DESCRIPTOR << 8) | \
3308 (L2_DESCRIPTOR);
3309 break;
3310 case 4:
3311 /* cache info: needed for Core compatibility */
3312 if (cpu->cache_info_passthrough) {
3313 host_cpuid(index, count, eax, ebx, ecx, edx);
3314 *eax &= ~0xFC000000;
3315 } else {
3316 *eax = 0;
3317 switch (count) {
3318 case 0: /* L1 dcache info */
3319 *eax |= CPUID_4_TYPE_DCACHE | \
3320 CPUID_4_LEVEL(1) | \
3321 CPUID_4_SELF_INIT_LEVEL;
3322 *ebx = (L1D_LINE_SIZE - 1) | \
3323 ((L1D_PARTITIONS - 1) << 12) | \
3324 ((L1D_ASSOCIATIVITY - 1) << 22);
3325 *ecx = L1D_SETS - 1;
3326 *edx = CPUID_4_NO_INVD_SHARING;
3327 break;
3328 case 1: /* L1 icache info */
3329 *eax |= CPUID_4_TYPE_ICACHE | \
3330 CPUID_4_LEVEL(1) | \
3331 CPUID_4_SELF_INIT_LEVEL;
3332 *ebx = (L1I_LINE_SIZE - 1) | \
3333 ((L1I_PARTITIONS - 1) << 12) | \
3334 ((L1I_ASSOCIATIVITY - 1) << 22);
3335 *ecx = L1I_SETS - 1;
3336 *edx = CPUID_4_NO_INVD_SHARING;
3337 break;
3338 case 2: /* L2 cache info */
3339 *eax |= CPUID_4_TYPE_UNIFIED | \
3340 CPUID_4_LEVEL(2) | \
3341 CPUID_4_SELF_INIT_LEVEL;
3342 if (cs->nr_threads > 1) {
3343 *eax |= (cs->nr_threads - 1) << 14;
3345 *ebx = (L2_LINE_SIZE - 1) | \
3346 ((L2_PARTITIONS - 1) << 12) | \
3347 ((L2_ASSOCIATIVITY - 1) << 22);
3348 *ecx = L2_SETS - 1;
3349 *edx = CPUID_4_NO_INVD_SHARING;
3350 break;
3351 case 3: /* L3 cache info */
3352 if (!cpu->enable_l3_cache) {
3353 *eax = 0;
3354 *ebx = 0;
3355 *ecx = 0;
3356 *edx = 0;
3357 break;
3359 *eax |= CPUID_4_TYPE_UNIFIED | \
3360 CPUID_4_LEVEL(3) | \
3361 CPUID_4_SELF_INIT_LEVEL;
3362 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3363 *eax |= ((1 << pkg_offset) - 1) << 14;
3364 *ebx = (L3_N_LINE_SIZE - 1) | \
3365 ((L3_N_PARTITIONS - 1) << 12) | \
3366 ((L3_N_ASSOCIATIVITY - 1) << 22);
3367 *ecx = L3_N_SETS - 1;
3368 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3369 break;
3370 default: /* end of info */
3371 *eax = 0;
3372 *ebx = 0;
3373 *ecx = 0;
3374 *edx = 0;
3375 break;
3379 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3380 if ((*eax & 31) && cs->nr_cores > 1) {
3381 *eax |= (cs->nr_cores - 1) << 26;
3383 break;
3384 case 5:
3385 /* mwait info: needed for Core compatibility */
3386 *eax = 0; /* Smallest monitor-line size in bytes */
3387 *ebx = 0; /* Largest monitor-line size in bytes */
3388 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3389 *edx = 0;
3390 break;
3391 case 6:
3392 /* Thermal and Power Leaf */
3393 *eax = env->features[FEAT_6_EAX];
3394 *ebx = 0;
3395 *ecx = 0;
3396 *edx = 0;
3397 break;
3398 case 7:
3399 /* Structured Extended Feature Flags Enumeration Leaf */
3400 if (count == 0) {
3401 *eax = 0; /* Maximum ECX value for sub-leaves */
3402 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3403 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3404 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3405 *ecx |= CPUID_7_0_ECX_OSPKE;
3407 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3408 } else {
3409 *eax = 0;
3410 *ebx = 0;
3411 *ecx = 0;
3412 *edx = 0;
3414 break;
3415 case 9:
3416 /* Direct Cache Access Information Leaf */
3417 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3418 *ebx = 0;
3419 *ecx = 0;
3420 *edx = 0;
3421 break;
3422 case 0xA:
3423 /* Architectural Performance Monitoring Leaf */
3424 if (kvm_enabled() && cpu->enable_pmu) {
3425 KVMState *s = cs->kvm_state;
3427 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3428 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3429 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3430 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3431 } else if (hvf_enabled() && cpu->enable_pmu) {
3432 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3433 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3434 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3435 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3436 } else {
3437 *eax = 0;
3438 *ebx = 0;
3439 *ecx = 0;
3440 *edx = 0;
3442 break;
3443 case 0xB:
3444 /* Extended Topology Enumeration Leaf */
3445 if (!cpu->enable_cpuid_0xb) {
3446 *eax = *ebx = *ecx = *edx = 0;
3447 break;
3450 *ecx = count & 0xff;
3451 *edx = cpu->apic_id;
3453 switch (count) {
3454 case 0:
3455 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3456 *ebx = cs->nr_threads;
3457 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3458 break;
3459 case 1:
3460 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3461 *ebx = cs->nr_cores * cs->nr_threads;
3462 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3463 break;
3464 default:
3465 *eax = 0;
3466 *ebx = 0;
3467 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3470 assert(!(*eax & ~0x1f));
3471 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3472 break;
3473 case 0xD: {
3474 /* Processor Extended State */
3475 *eax = 0;
3476 *ebx = 0;
3477 *ecx = 0;
3478 *edx = 0;
3479 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3480 break;
3483 if (count == 0) {
3484 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3485 *eax = env->features[FEAT_XSAVE_COMP_LO];
3486 *edx = env->features[FEAT_XSAVE_COMP_HI];
3487 *ebx = *ecx;
3488 } else if (count == 1) {
3489 *eax = env->features[FEAT_XSAVE];
3490 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3491 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3492 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3493 *eax = esa->size;
3494 *ebx = esa->offset;
3497 break;
3499 case 0x14: {
3500 /* Intel Processor Trace Enumeration */
3501 *eax = 0;
3502 *ebx = 0;
3503 *ecx = 0;
3504 *edx = 0;
3505 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3506 !kvm_enabled()) {
3507 break;
3510 if (count == 0) {
3511 *eax = INTEL_PT_MAX_SUBLEAF;
3512 *ebx = INTEL_PT_MINIMAL_EBX;
3513 *ecx = INTEL_PT_MINIMAL_ECX;
3514 } else if (count == 1) {
3515 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3516 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3518 break;
3520 case 0x40000000:
3522 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3523 * set here, but we restrict to TCG none the less.
3525 if (tcg_enabled() && cpu->expose_tcg) {
3526 memcpy(signature, "TCGTCGTCGTCG", 12);
3527 *eax = 0x40000001;
3528 *ebx = signature[0];
3529 *ecx = signature[1];
3530 *edx = signature[2];
3531 } else {
3532 *eax = 0;
3533 *ebx = 0;
3534 *ecx = 0;
3535 *edx = 0;
3537 break;
3538 case 0x40000001:
3539 *eax = 0;
3540 *ebx = 0;
3541 *ecx = 0;
3542 *edx = 0;
3543 break;
3544 case 0x80000000:
3545 *eax = env->cpuid_xlevel;
3546 *ebx = env->cpuid_vendor1;
3547 *edx = env->cpuid_vendor2;
3548 *ecx = env->cpuid_vendor3;
3549 break;
3550 case 0x80000001:
3551 *eax = env->cpuid_version;
3552 *ebx = 0;
3553 *ecx = env->features[FEAT_8000_0001_ECX];
3554 *edx = env->features[FEAT_8000_0001_EDX];
3556 /* The Linux kernel checks for the CMPLegacy bit and
3557 * discards multiple thread information if it is set.
3558 * So don't set it here for Intel to make Linux guests happy.
3560 if (cs->nr_cores * cs->nr_threads > 1) {
3561 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3562 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3563 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3564 *ecx |= 1 << 1; /* CmpLegacy bit */
3567 break;
3568 case 0x80000002:
3569 case 0x80000003:
3570 case 0x80000004:
3571 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3572 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3573 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3574 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3575 break;
3576 case 0x80000005:
3577 /* cache info (L1 cache) */
3578 if (cpu->cache_info_passthrough) {
3579 host_cpuid(index, 0, eax, ebx, ecx, edx);
3580 break;
3582 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3583 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3584 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3585 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3586 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3587 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3588 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3589 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3590 break;
3591 case 0x80000006:
3592 /* cache info (L2 cache) */
3593 if (cpu->cache_info_passthrough) {
3594 host_cpuid(index, 0, eax, ebx, ecx, edx);
3595 break;
3597 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3598 (L2_DTLB_2M_ENTRIES << 16) | \
3599 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3600 (L2_ITLB_2M_ENTRIES);
3601 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3602 (L2_DTLB_4K_ENTRIES << 16) | \
3603 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3604 (L2_ITLB_4K_ENTRIES);
3605 *ecx = (L2_SIZE_KB_AMD << 16) | \
3606 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3607 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3608 if (!cpu->enable_l3_cache) {
3609 *edx = ((L3_SIZE_KB / 512) << 18) | \
3610 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3611 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3612 } else {
3613 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3614 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3615 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3617 break;
3618 case 0x80000007:
3619 *eax = 0;
3620 *ebx = 0;
3621 *ecx = 0;
3622 *edx = env->features[FEAT_8000_0007_EDX];
3623 break;
3624 case 0x80000008:
3625 /* virtual & phys address size in low 2 bytes. */
3626 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3627 /* 64 bit processor */
3628 *eax = cpu->phys_bits; /* configurable physical bits */
3629 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3630 *eax |= 0x00003900; /* 57 bits virtual */
3631 } else {
3632 *eax |= 0x00003000; /* 48 bits virtual */
3634 } else {
3635 *eax = cpu->phys_bits;
3637 *ebx = env->features[FEAT_8000_0008_EBX];
3638 *ecx = 0;
3639 *edx = 0;
3640 if (cs->nr_cores * cs->nr_threads > 1) {
3641 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3643 break;
3644 case 0x8000000A:
3645 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3646 *eax = 0x00000001; /* SVM Revision */
3647 *ebx = 0x00000010; /* nr of ASIDs */
3648 *ecx = 0;
3649 *edx = env->features[FEAT_SVM]; /* optional features */
3650 } else {
3651 *eax = 0;
3652 *ebx = 0;
3653 *ecx = 0;
3654 *edx = 0;
3656 break;
3657 case 0xC0000000:
3658 *eax = env->cpuid_xlevel2;
3659 *ebx = 0;
3660 *ecx = 0;
3661 *edx = 0;
3662 break;
3663 case 0xC0000001:
3664 /* Support for VIA CPU's CPUID instruction */
3665 *eax = env->cpuid_version;
3666 *ebx = 0;
3667 *ecx = 0;
3668 *edx = env->features[FEAT_C000_0001_EDX];
3669 break;
3670 case 0xC0000002:
3671 case 0xC0000003:
3672 case 0xC0000004:
3673 /* Reserved for the future, and now filled with zero */
3674 *eax = 0;
3675 *ebx = 0;
3676 *ecx = 0;
3677 *edx = 0;
3678 break;
3679 case 0x8000001F:
3680 *eax = sev_enabled() ? 0x2 : 0;
3681 *ebx = sev_get_cbit_position();
3682 *ebx |= sev_get_reduced_phys_bits() << 6;
3683 *ecx = 0;
3684 *edx = 0;
3685 break;
3686 default:
3687 /* reserved values: zero */
3688 *eax = 0;
3689 *ebx = 0;
3690 *ecx = 0;
3691 *edx = 0;
3692 break;
3696 /* CPUClass::reset() */
3697 static void x86_cpu_reset(CPUState *s)
3699 X86CPU *cpu = X86_CPU(s);
3700 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3701 CPUX86State *env = &cpu->env;
3702 target_ulong cr4;
3703 uint64_t xcr0;
3704 int i;
3706 xcc->parent_reset(s);
3708 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3710 env->old_exception = -1;
3712 /* init to reset state */
3714 env->hflags2 |= HF2_GIF_MASK;
3716 cpu_x86_update_cr0(env, 0x60000010);
3717 env->a20_mask = ~0x0;
3718 env->smbase = 0x30000;
3719 env->msr_smi_count = 0;
3721 env->idt.limit = 0xffff;
3722 env->gdt.limit = 0xffff;
3723 env->ldt.limit = 0xffff;
3724 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3725 env->tr.limit = 0xffff;
3726 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3728 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3729 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3730 DESC_R_MASK | DESC_A_MASK);
3731 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3733 DESC_A_MASK);
3734 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3736 DESC_A_MASK);
3737 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3739 DESC_A_MASK);
3740 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3741 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3742 DESC_A_MASK);
3743 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3744 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3745 DESC_A_MASK);
3747 env->eip = 0xfff0;
3748 env->regs[R_EDX] = env->cpuid_version;
3750 env->eflags = 0x2;
3752 /* FPU init */
3753 for (i = 0; i < 8; i++) {
3754 env->fptags[i] = 1;
3756 cpu_set_fpuc(env, 0x37f);
3758 env->mxcsr = 0x1f80;
3759 /* All units are in INIT state. */
3760 env->xstate_bv = 0;
3762 env->pat = 0x0007040600070406ULL;
3763 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3765 memset(env->dr, 0, sizeof(env->dr));
3766 env->dr[6] = DR6_FIXED_1;
3767 env->dr[7] = DR7_FIXED_1;
3768 cpu_breakpoint_remove_all(s, BP_CPU);
3769 cpu_watchpoint_remove_all(s, BP_CPU);
3771 cr4 = 0;
3772 xcr0 = XSTATE_FP_MASK;
3774 #ifdef CONFIG_USER_ONLY
3775 /* Enable all the features for user-mode. */
3776 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3777 xcr0 |= XSTATE_SSE_MASK;
3779 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3780 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3781 if (env->features[esa->feature] & esa->bits) {
3782 xcr0 |= 1ull << i;
3786 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3787 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3789 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3790 cr4 |= CR4_FSGSBASE_MASK;
3792 #endif
3794 env->xcr0 = xcr0;
3795 cpu_x86_update_cr4(env, cr4);
3798 * SDM 11.11.5 requires:
3799 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3800 * - IA32_MTRR_PHYSMASKn.V = 0
3801 * All other bits are undefined. For simplification, zero it all.
3803 env->mtrr_deftype = 0;
3804 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3805 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3807 env->interrupt_injected = -1;
3808 env->exception_injected = -1;
3809 env->nmi_injected = false;
3810 #if !defined(CONFIG_USER_ONLY)
3811 /* We hard-wire the BSP to the first CPU. */
3812 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3814 s->halted = !cpu_is_bsp(cpu);
3816 if (kvm_enabled()) {
3817 kvm_arch_reset_vcpu(cpu);
3819 else if (hvf_enabled()) {
3820 hvf_reset_vcpu(s);
3822 #endif
3825 #ifndef CONFIG_USER_ONLY
3826 bool cpu_is_bsp(X86CPU *cpu)
3828 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3831 /* TODO: remove me, when reset over QOM tree is implemented */
3832 static void x86_cpu_machine_reset_cb(void *opaque)
3834 X86CPU *cpu = opaque;
3835 cpu_reset(CPU(cpu));
3837 #endif
3839 static void mce_init(X86CPU *cpu)
3841 CPUX86State *cenv = &cpu->env;
3842 unsigned int bank;
3844 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3845 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3846 (CPUID_MCE | CPUID_MCA)) {
3847 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3848 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3849 cenv->mcg_ctl = ~(uint64_t)0;
3850 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3851 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3856 #ifndef CONFIG_USER_ONLY
3857 APICCommonClass *apic_get_class(void)
3859 const char *apic_type = "apic";
3861 /* TODO: in-kernel irqchip for hvf */
3862 if (kvm_apic_in_kernel()) {
3863 apic_type = "kvm-apic";
3864 } else if (xen_enabled()) {
3865 apic_type = "xen-apic";
3868 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3871 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3873 APICCommonState *apic;
3874 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3876 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3878 object_property_add_child(OBJECT(cpu), "lapic",
3879 OBJECT(cpu->apic_state), &error_abort);
3880 object_unref(OBJECT(cpu->apic_state));
3882 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3883 /* TODO: convert to link<> */
3884 apic = APIC_COMMON(cpu->apic_state);
3885 apic->cpu = cpu;
3886 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3889 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3891 APICCommonState *apic;
3892 static bool apic_mmio_map_once;
3894 if (cpu->apic_state == NULL) {
3895 return;
3897 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3898 errp);
3900 /* Map APIC MMIO area */
3901 apic = APIC_COMMON(cpu->apic_state);
3902 if (!apic_mmio_map_once) {
3903 memory_region_add_subregion_overlap(get_system_memory(),
3904 apic->apicbase &
3905 MSR_IA32_APICBASE_BASE,
3906 &apic->io_memory,
3907 0x1000);
3908 apic_mmio_map_once = true;
3912 static void x86_cpu_machine_done(Notifier *n, void *unused)
3914 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3915 MemoryRegion *smram =
3916 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3918 if (smram) {
3919 cpu->smram = g_new(MemoryRegion, 1);
3920 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3921 smram, 0, 1ull << 32);
3922 memory_region_set_enabled(cpu->smram, true);
3923 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3926 #else
3927 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3930 #endif
3932 /* Note: Only safe for use on x86(-64) hosts */
3933 static uint32_t x86_host_phys_bits(void)
3935 uint32_t eax;
3936 uint32_t host_phys_bits;
3938 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3939 if (eax >= 0x80000008) {
3940 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3941 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3942 * at 23:16 that can specify a maximum physical address bits for
3943 * the guest that can override this value; but I've not seen
3944 * anything with that set.
3946 host_phys_bits = eax & 0xff;
3947 } else {
3948 /* It's an odd 64 bit machine that doesn't have the leaf for
3949 * physical address bits; fall back to 36 that's most older
3950 * Intel.
3952 host_phys_bits = 36;
3955 return host_phys_bits;
3958 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3960 if (*min < value) {
3961 *min = value;
3965 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3966 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3968 CPUX86State *env = &cpu->env;
3969 FeatureWordInfo *fi = &feature_word_info[w];
3970 uint32_t eax = fi->cpuid_eax;
3971 uint32_t region = eax & 0xF0000000;
3973 if (!env->features[w]) {
3974 return;
3977 switch (region) {
3978 case 0x00000000:
3979 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3980 break;
3981 case 0x80000000:
3982 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3983 break;
3984 case 0xC0000000:
3985 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3986 break;
3990 /* Calculate XSAVE components based on the configured CPU feature flags */
3991 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3993 CPUX86State *env = &cpu->env;
3994 int i;
3995 uint64_t mask;
3997 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3998 return;
4001 mask = 0;
4002 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4003 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4004 if (env->features[esa->feature] & esa->bits) {
4005 mask |= (1ULL << i);
4009 env->features[FEAT_XSAVE_COMP_LO] = mask;
4010 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4013 /***** Steps involved on loading and filtering CPUID data
4015 * When initializing and realizing a CPU object, the steps
4016 * involved in setting up CPUID data are:
4018 * 1) Loading CPU model definition (X86CPUDefinition). This is
4019 * implemented by x86_cpu_load_def() and should be completely
4020 * transparent, as it is done automatically by instance_init.
4021 * No code should need to look at X86CPUDefinition structs
4022 * outside instance_init.
4024 * 2) CPU expansion. This is done by realize before CPUID
4025 * filtering, and will make sure host/accelerator data is
4026 * loaded for CPU models that depend on host capabilities
4027 * (e.g. "host"). Done by x86_cpu_expand_features().
4029 * 3) CPUID filtering. This initializes extra data related to
4030 * CPUID, and checks if the host supports all capabilities
4031 * required by the CPU. Runnability of a CPU model is
4032 * determined at this step. Done by x86_cpu_filter_features().
4034 * Some operations don't require all steps to be performed.
4035 * More precisely:
4037 * - CPU instance creation (instance_init) will run only CPU
4038 * model loading. CPU expansion can't run at instance_init-time
4039 * because host/accelerator data may be not available yet.
4040 * - CPU realization will perform both CPU model expansion and CPUID
4041 * filtering, and return an error in case one of them fails.
4042 * - query-cpu-definitions needs to run all 3 steps. It needs
4043 * to run CPUID filtering, as the 'unavailable-features'
4044 * field is set based on the filtering results.
4045 * - The query-cpu-model-expansion QMP command only needs to run
4046 * CPU model loading and CPU expansion. It should not filter
4047 * any CPUID data based on host capabilities.
4050 /* Expand CPU configuration data, based on configured features
4051 * and host/accelerator capabilities when appropriate.
4053 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4055 CPUX86State *env = &cpu->env;
4056 FeatureWord w;
4057 GList *l;
4058 Error *local_err = NULL;
4060 /*TODO: Now cpu->max_features doesn't overwrite features
4061 * set using QOM properties, and we can convert
4062 * plus_features & minus_features to global properties
4063 * inside x86_cpu_parse_featurestr() too.
4065 if (cpu->max_features) {
4066 for (w = 0; w < FEATURE_WORDS; w++) {
4067 /* Override only features that weren't set explicitly
4068 * by the user.
4070 env->features[w] |=
4071 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4072 ~env->user_features[w] & \
4073 ~feature_word_info[w].no_autoenable_flags;
4077 for (l = plus_features; l; l = l->next) {
4078 const char *prop = l->data;
4079 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4080 if (local_err) {
4081 goto out;
4085 for (l = minus_features; l; l = l->next) {
4086 const char *prop = l->data;
4087 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4088 if (local_err) {
4089 goto out;
4093 if (!kvm_enabled() || !cpu->expose_kvm) {
4094 env->features[FEAT_KVM] = 0;
4097 x86_cpu_enable_xsave_components(cpu);
4099 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4100 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4101 if (cpu->full_cpuid_auto_level) {
4102 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4103 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4104 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4105 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4106 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4107 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4108 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4109 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4110 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4111 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4112 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4113 /* SVM requires CPUID[0x8000000A] */
4114 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4115 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4118 /* SEV requires CPUID[0x8000001F] */
4119 if (sev_enabled()) {
4120 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4124 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4125 if (env->cpuid_level == UINT32_MAX) {
4126 env->cpuid_level = env->cpuid_min_level;
4128 if (env->cpuid_xlevel == UINT32_MAX) {
4129 env->cpuid_xlevel = env->cpuid_min_xlevel;
4131 if (env->cpuid_xlevel2 == UINT32_MAX) {
4132 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4135 out:
4136 if (local_err != NULL) {
4137 error_propagate(errp, local_err);
4142 * Finishes initialization of CPUID data, filters CPU feature
4143 * words based on host availability of each feature.
4145 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4147 static int x86_cpu_filter_features(X86CPU *cpu)
4149 CPUX86State *env = &cpu->env;
4150 FeatureWord w;
4151 int rv = 0;
4153 for (w = 0; w < FEATURE_WORDS; w++) {
4154 uint32_t host_feat =
4155 x86_cpu_get_supported_feature_word(w, false);
4156 uint32_t requested_features = env->features[w];
4157 env->features[w] &= host_feat;
4158 cpu->filtered_features[w] = requested_features & ~env->features[w];
4159 if (cpu->filtered_features[w]) {
4160 rv = 1;
4164 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4165 kvm_enabled()) {
4166 KVMState *s = CPU(cpu)->kvm_state;
4167 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4168 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4169 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4170 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4171 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4173 if (!eax_0 ||
4174 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4175 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4176 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4177 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4178 INTEL_PT_ADDR_RANGES_NUM) ||
4179 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4180 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4181 (ecx_0 & INTEL_PT_IP_LIP)) {
4183 * Processor Trace capabilities aren't configurable, so if the
4184 * host can't emulate the capabilities we report on
4185 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4187 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4188 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4189 rv = 1;
4193 return rv;
4196 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4197 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4198 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4199 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4200 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4201 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4202 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4204 CPUState *cs = CPU(dev);
4205 X86CPU *cpu = X86_CPU(dev);
4206 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4207 CPUX86State *env = &cpu->env;
4208 Error *local_err = NULL;
4209 static bool ht_warned;
4211 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4212 char *name = x86_cpu_class_get_model_name(xcc);
4213 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4214 g_free(name);
4215 goto out;
4218 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4219 error_setg(errp, "apic-id property was not initialized properly");
4220 return;
4223 x86_cpu_expand_features(cpu, &local_err);
4224 if (local_err) {
4225 goto out;
4228 if (x86_cpu_filter_features(cpu) &&
4229 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4230 x86_cpu_report_filtered_features(cpu);
4231 if (cpu->enforce_cpuid) {
4232 error_setg(&local_err,
4233 accel_uses_host_cpuid() ?
4234 "Host doesn't support requested features" :
4235 "TCG doesn't support requested features");
4236 goto out;
4240 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4241 * CPUID[1].EDX.
4243 if (IS_AMD_CPU(env)) {
4244 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4245 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4246 & CPUID_EXT2_AMD_ALIASES);
4249 /* For 64bit systems think about the number of physical bits to present.
4250 * ideally this should be the same as the host; anything other than matching
4251 * the host can cause incorrect guest behaviour.
4252 * QEMU used to pick the magic value of 40 bits that corresponds to
4253 * consumer AMD devices but nothing else.
4255 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4256 if (accel_uses_host_cpuid()) {
4257 uint32_t host_phys_bits = x86_host_phys_bits();
4258 static bool warned;
4260 if (cpu->host_phys_bits) {
4261 /* The user asked for us to use the host physical bits */
4262 cpu->phys_bits = host_phys_bits;
4265 /* Print a warning if the user set it to a value that's not the
4266 * host value.
4268 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4269 !warned) {
4270 warn_report("Host physical bits (%u)"
4271 " does not match phys-bits property (%u)",
4272 host_phys_bits, cpu->phys_bits);
4273 warned = true;
4276 if (cpu->phys_bits &&
4277 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4278 cpu->phys_bits < 32)) {
4279 error_setg(errp, "phys-bits should be between 32 and %u "
4280 " (but is %u)",
4281 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4282 return;
4284 } else {
4285 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4286 error_setg(errp, "TCG only supports phys-bits=%u",
4287 TCG_PHYS_ADDR_BITS);
4288 return;
4291 /* 0 means it was not explicitly set by the user (or by machine
4292 * compat_props or by the host code above). In this case, the default
4293 * is the value used by TCG (40).
4295 if (cpu->phys_bits == 0) {
4296 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4298 } else {
4299 /* For 32 bit systems don't use the user set value, but keep
4300 * phys_bits consistent with what we tell the guest.
4302 if (cpu->phys_bits != 0) {
4303 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4304 return;
4307 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4308 cpu->phys_bits = 36;
4309 } else {
4310 cpu->phys_bits = 32;
4313 cpu_exec_realizefn(cs, &local_err);
4314 if (local_err != NULL) {
4315 error_propagate(errp, local_err);
4316 return;
4319 #ifndef CONFIG_USER_ONLY
4320 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4322 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4323 x86_cpu_apic_create(cpu, &local_err);
4324 if (local_err != NULL) {
4325 goto out;
4328 #endif
4330 mce_init(cpu);
4332 #ifndef CONFIG_USER_ONLY
4333 if (tcg_enabled()) {
4334 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4335 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4337 /* Outer container... */
4338 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4339 memory_region_set_enabled(cpu->cpu_as_root, true);
4341 /* ... with two regions inside: normal system memory with low
4342 * priority, and...
4344 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4345 get_system_memory(), 0, ~0ull);
4346 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4347 memory_region_set_enabled(cpu->cpu_as_mem, true);
4349 cs->num_ases = 2;
4350 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4351 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4353 /* ... SMRAM with higher priority, linked from /machine/smram. */
4354 cpu->machine_done.notify = x86_cpu_machine_done;
4355 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4357 #endif
4359 qemu_init_vcpu(cs);
4361 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4362 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4363 * based on inputs (sockets,cores,threads), it is still better to gives
4364 * users a warning.
4366 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4367 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4369 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4370 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4371 " -smp options properly.");
4372 ht_warned = true;
4375 x86_cpu_apic_realize(cpu, &local_err);
4376 if (local_err != NULL) {
4377 goto out;
4379 cpu_reset(cs);
4381 xcc->parent_realize(dev, &local_err);
4383 out:
4384 if (local_err != NULL) {
4385 error_propagate(errp, local_err);
4386 return;
4390 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4392 X86CPU *cpu = X86_CPU(dev);
4393 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4394 Error *local_err = NULL;
4396 #ifndef CONFIG_USER_ONLY
4397 cpu_remove_sync(CPU(dev));
4398 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4399 #endif
4401 if (cpu->apic_state) {
4402 object_unparent(OBJECT(cpu->apic_state));
4403 cpu->apic_state = NULL;
4406 xcc->parent_unrealize(dev, &local_err);
4407 if (local_err != NULL) {
4408 error_propagate(errp, local_err);
4409 return;
4413 typedef struct BitProperty {
4414 FeatureWord w;
4415 uint32_t mask;
4416 } BitProperty;
4418 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4419 void *opaque, Error **errp)
4421 X86CPU *cpu = X86_CPU(obj);
4422 BitProperty *fp = opaque;
4423 uint32_t f = cpu->env.features[fp->w];
4424 bool value = (f & fp->mask) == fp->mask;
4425 visit_type_bool(v, name, &value, errp);
4428 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4429 void *opaque, Error **errp)
4431 DeviceState *dev = DEVICE(obj);
4432 X86CPU *cpu = X86_CPU(obj);
4433 BitProperty *fp = opaque;
4434 Error *local_err = NULL;
4435 bool value;
4437 if (dev->realized) {
4438 qdev_prop_set_after_realize(dev, name, errp);
4439 return;
4442 visit_type_bool(v, name, &value, &local_err);
4443 if (local_err) {
4444 error_propagate(errp, local_err);
4445 return;
4448 if (value) {
4449 cpu->env.features[fp->w] |= fp->mask;
4450 } else {
4451 cpu->env.features[fp->w] &= ~fp->mask;
4453 cpu->env.user_features[fp->w] |= fp->mask;
4456 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4457 void *opaque)
4459 BitProperty *prop = opaque;
4460 g_free(prop);
4463 /* Register a boolean property to get/set a single bit in a uint32_t field.
4465 * The same property name can be registered multiple times to make it affect
4466 * multiple bits in the same FeatureWord. In that case, the getter will return
4467 * true only if all bits are set.
4469 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4470 const char *prop_name,
4471 FeatureWord w,
4472 int bitnr)
4474 BitProperty *fp;
4475 ObjectProperty *op;
4476 uint32_t mask = (1UL << bitnr);
4478 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4479 if (op) {
4480 fp = op->opaque;
4481 assert(fp->w == w);
4482 fp->mask |= mask;
4483 } else {
4484 fp = g_new0(BitProperty, 1);
4485 fp->w = w;
4486 fp->mask = mask;
4487 object_property_add(OBJECT(cpu), prop_name, "bool",
4488 x86_cpu_get_bit_prop,
4489 x86_cpu_set_bit_prop,
4490 x86_cpu_release_bit_prop, fp, &error_abort);
4494 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4495 FeatureWord w,
4496 int bitnr)
4498 FeatureWordInfo *fi = &feature_word_info[w];
4499 const char *name = fi->feat_names[bitnr];
4501 if (!name) {
4502 return;
4505 /* Property names should use "-" instead of "_".
4506 * Old names containing underscores are registered as aliases
4507 * using object_property_add_alias()
4509 assert(!strchr(name, '_'));
4510 /* aliases don't use "|" delimiters anymore, they are registered
4511 * manually using object_property_add_alias() */
4512 assert(!strchr(name, '|'));
4513 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4516 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4518 X86CPU *cpu = X86_CPU(cs);
4519 CPUX86State *env = &cpu->env;
4520 GuestPanicInformation *panic_info = NULL;
4522 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4523 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4525 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4527 assert(HV_CRASH_PARAMS >= 5);
4528 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4529 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4530 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4531 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4532 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4535 return panic_info;
4537 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4538 const char *name, void *opaque,
4539 Error **errp)
4541 CPUState *cs = CPU(obj);
4542 GuestPanicInformation *panic_info;
4544 if (!cs->crash_occurred) {
4545 error_setg(errp, "No crash occured");
4546 return;
4549 panic_info = x86_cpu_get_crash_info(cs);
4550 if (panic_info == NULL) {
4551 error_setg(errp, "No crash information");
4552 return;
4555 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4556 errp);
4557 qapi_free_GuestPanicInformation(panic_info);
4560 static void x86_cpu_initfn(Object *obj)
4562 CPUState *cs = CPU(obj);
4563 X86CPU *cpu = X86_CPU(obj);
4564 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4565 CPUX86State *env = &cpu->env;
4566 FeatureWord w;
4568 cs->env_ptr = env;
4570 object_property_add(obj, "family", "int",
4571 x86_cpuid_version_get_family,
4572 x86_cpuid_version_set_family, NULL, NULL, NULL);
4573 object_property_add(obj, "model", "int",
4574 x86_cpuid_version_get_model,
4575 x86_cpuid_version_set_model, NULL, NULL, NULL);
4576 object_property_add(obj, "stepping", "int",
4577 x86_cpuid_version_get_stepping,
4578 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4579 object_property_add_str(obj, "vendor",
4580 x86_cpuid_get_vendor,
4581 x86_cpuid_set_vendor, NULL);
4582 object_property_add_str(obj, "model-id",
4583 x86_cpuid_get_model_id,
4584 x86_cpuid_set_model_id, NULL);
4585 object_property_add(obj, "tsc-frequency", "int",
4586 x86_cpuid_get_tsc_freq,
4587 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4588 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4589 x86_cpu_get_feature_words,
4590 NULL, NULL, (void *)env->features, NULL);
4591 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4592 x86_cpu_get_feature_words,
4593 NULL, NULL, (void *)cpu->filtered_features, NULL);
4595 object_property_add(obj, "crash-information", "GuestPanicInformation",
4596 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4598 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4600 for (w = 0; w < FEATURE_WORDS; w++) {
4601 int bitnr;
4603 for (bitnr = 0; bitnr < 32; bitnr++) {
4604 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4608 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4609 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4610 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4611 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4612 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4613 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4614 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4616 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4617 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4618 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4619 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4620 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4621 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4622 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4623 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4624 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4625 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4626 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4627 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4628 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4629 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4630 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4631 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4632 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4633 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4634 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4635 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4636 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4638 if (xcc->cpu_def) {
4639 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4643 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4645 X86CPU *cpu = X86_CPU(cs);
4647 return cpu->apic_id;
4650 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4652 X86CPU *cpu = X86_CPU(cs);
4654 return cpu->env.cr[0] & CR0_PG_MASK;
4657 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4659 X86CPU *cpu = X86_CPU(cs);
4661 cpu->env.eip = value;
4664 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4666 X86CPU *cpu = X86_CPU(cs);
4668 cpu->env.eip = tb->pc - tb->cs_base;
4671 static bool x86_cpu_has_work(CPUState *cs)
4673 X86CPU *cpu = X86_CPU(cs);
4674 CPUX86State *env = &cpu->env;
4676 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4677 CPU_INTERRUPT_POLL)) &&
4678 (env->eflags & IF_MASK)) ||
4679 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4680 CPU_INTERRUPT_INIT |
4681 CPU_INTERRUPT_SIPI |
4682 CPU_INTERRUPT_MCE)) ||
4683 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4684 !(env->hflags & HF_SMM_MASK));
4687 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4689 X86CPU *cpu = X86_CPU(cs);
4690 CPUX86State *env = &cpu->env;
4692 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4693 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4694 : bfd_mach_i386_i8086);
4695 info->print_insn = print_insn_i386;
4697 info->cap_arch = CS_ARCH_X86;
4698 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4699 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4700 : CS_MODE_16);
4701 info->cap_insn_unit = 1;
4702 info->cap_insn_split = 8;
4705 void x86_update_hflags(CPUX86State *env)
4707 uint32_t hflags;
4708 #define HFLAG_COPY_MASK \
4709 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4710 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4711 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4712 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4714 hflags = env->hflags & HFLAG_COPY_MASK;
4715 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4716 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4717 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4718 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4719 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4721 if (env->cr[4] & CR4_OSFXSR_MASK) {
4722 hflags |= HF_OSFXSR_MASK;
4725 if (env->efer & MSR_EFER_LMA) {
4726 hflags |= HF_LMA_MASK;
4729 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4730 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4731 } else {
4732 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4733 (DESC_B_SHIFT - HF_CS32_SHIFT);
4734 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4735 (DESC_B_SHIFT - HF_SS32_SHIFT);
4736 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4737 !(hflags & HF_CS32_MASK)) {
4738 hflags |= HF_ADDSEG_MASK;
4739 } else {
4740 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4741 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4744 env->hflags = hflags;
4747 static Property x86_cpu_properties[] = {
4748 #ifdef CONFIG_USER_ONLY
4749 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4750 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4751 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4752 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4753 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4754 #else
4755 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4756 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4757 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4758 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4759 #endif
4760 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4761 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4762 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4763 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4764 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4765 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4766 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4767 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4768 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4769 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4770 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4771 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4772 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
4773 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4774 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4775 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4776 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4777 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4778 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4779 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4780 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4781 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4782 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4783 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4784 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4785 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4786 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4787 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4788 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4789 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4790 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4791 false),
4792 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4793 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4796 * From "Requirements for Implementing the Microsoft
4797 * Hypervisor Interface":
4798 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4800 * "Starting with Windows Server 2012 and Windows 8, if
4801 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4802 * the hypervisor imposes no specific limit to the number of VPs.
4803 * In this case, Windows Server 2012 guest VMs may use more than
4804 * 64 VPs, up to the maximum supported number of processors applicable
4805 * to the specific Windows version being used."
4807 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4808 DEFINE_PROP_END_OF_LIST()
4811 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4813 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4814 CPUClass *cc = CPU_CLASS(oc);
4815 DeviceClass *dc = DEVICE_CLASS(oc);
4817 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4818 &xcc->parent_realize);
4819 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4820 &xcc->parent_unrealize);
4821 dc->props = x86_cpu_properties;
4823 xcc->parent_reset = cc->reset;
4824 cc->reset = x86_cpu_reset;
4825 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4827 cc->class_by_name = x86_cpu_class_by_name;
4828 cc->parse_features = x86_cpu_parse_featurestr;
4829 cc->has_work = x86_cpu_has_work;
4830 #ifdef CONFIG_TCG
4831 cc->do_interrupt = x86_cpu_do_interrupt;
4832 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4833 #endif
4834 cc->dump_state = x86_cpu_dump_state;
4835 cc->get_crash_info = x86_cpu_get_crash_info;
4836 cc->set_pc = x86_cpu_set_pc;
4837 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4838 cc->gdb_read_register = x86_cpu_gdb_read_register;
4839 cc->gdb_write_register = x86_cpu_gdb_write_register;
4840 cc->get_arch_id = x86_cpu_get_arch_id;
4841 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4842 #ifdef CONFIG_USER_ONLY
4843 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4844 #else
4845 cc->asidx_from_attrs = x86_asidx_from_attrs;
4846 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4847 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4848 cc->write_elf64_note = x86_cpu_write_elf64_note;
4849 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4850 cc->write_elf32_note = x86_cpu_write_elf32_note;
4851 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4852 cc->vmsd = &vmstate_x86_cpu;
4853 #endif
4854 cc->gdb_arch_name = x86_gdb_arch_name;
4855 #ifdef TARGET_X86_64
4856 cc->gdb_core_xml_file = "i386-64bit.xml";
4857 cc->gdb_num_core_regs = 57;
4858 #else
4859 cc->gdb_core_xml_file = "i386-32bit.xml";
4860 cc->gdb_num_core_regs = 41;
4861 #endif
4862 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4863 cc->debug_excp_handler = breakpoint_handler;
4864 #endif
4865 cc->cpu_exec_enter = x86_cpu_exec_enter;
4866 cc->cpu_exec_exit = x86_cpu_exec_exit;
4867 #ifdef CONFIG_TCG
4868 cc->tcg_initialize = tcg_x86_init;
4869 #endif
4870 cc->disas_set_info = x86_disas_set_info;
4872 dc->user_creatable = true;
4875 static const TypeInfo x86_cpu_type_info = {
4876 .name = TYPE_X86_CPU,
4877 .parent = TYPE_CPU,
4878 .instance_size = sizeof(X86CPU),
4879 .instance_init = x86_cpu_initfn,
4880 .abstract = true,
4881 .class_size = sizeof(X86CPUClass),
4882 .class_init = x86_cpu_common_class_init,
4886 /* "base" CPU model, used by query-cpu-model-expansion */
4887 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4889 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4891 xcc->static_model = true;
4892 xcc->migration_safe = true;
4893 xcc->model_description = "base CPU model type with no features enabled";
4894 xcc->ordering = 8;
4897 static const TypeInfo x86_base_cpu_type_info = {
4898 .name = X86_CPU_TYPE_NAME("base"),
4899 .parent = TYPE_X86_CPU,
4900 .class_init = x86_cpu_base_class_init,
4903 static void x86_cpu_register_types(void)
4905 int i;
4907 type_register_static(&x86_cpu_type_info);
4908 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4909 x86_register_cpudef_type(&builtin_x86_defs[i]);
4911 type_register_static(&max_x86_cpu_type_info);
4912 type_register_static(&x86_base_cpu_type_info);
4913 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4914 type_register_static(&host_x86_cpu_type_info);
4915 #endif
4918 type_init(x86_cpu_register_types)