cpu/i386: populate CPUID 0x8000_001F when SEV is active
[qemu/ar7.git] / target / i386 / cpu.c
blob01607dd0463ec1fa2678a14bb84207ed4c1acef9
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
57 #include "disas/capstone.h"
60 /* Cache topology CPUID constants: */
62 /* CPUID Leaf 2 Descriptors */
64 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
65 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
66 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
67 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
70 /* CPUID Leaf 4 constants: */
72 /* EAX: */
73 #define CPUID_4_TYPE_DCACHE 1
74 #define CPUID_4_TYPE_ICACHE 2
75 #define CPUID_4_TYPE_UNIFIED 3
77 #define CPUID_4_LEVEL(l) ((l) << 5)
79 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
80 #define CPUID_4_FULLY_ASSOC (1 << 9)
82 /* EDX: */
83 #define CPUID_4_NO_INVD_SHARING (1 << 0)
84 #define CPUID_4_INCLUSIVE (1 << 1)
85 #define CPUID_4_COMPLEX_IDX (1 << 2)
87 #define ASSOC_FULL 0xFF
89 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
90 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == 2 ? 0x2 : \
92 a == 4 ? 0x4 : \
93 a == 8 ? 0x6 : \
94 a == 16 ? 0x8 : \
95 a == 32 ? 0xA : \
96 a == 48 ? 0xB : \
97 a == 64 ? 0xC : \
98 a == 96 ? 0xD : \
99 a == 128 ? 0xE : \
100 a == ASSOC_FULL ? 0xF : \
101 0 /* invalid value */)
104 /* Definitions of the hardcoded cache entries we expose: */
106 /* L1 data cache: */
107 #define L1D_LINE_SIZE 64
108 #define L1D_ASSOCIATIVITY 8
109 #define L1D_SETS 64
110 #define L1D_PARTITIONS 1
111 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
112 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
113 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
114 #define L1D_LINES_PER_TAG 1
115 #define L1D_SIZE_KB_AMD 64
116 #define L1D_ASSOCIATIVITY_AMD 2
118 /* L1 instruction cache: */
119 #define L1I_LINE_SIZE 64
120 #define L1I_ASSOCIATIVITY 8
121 #define L1I_SETS 64
122 #define L1I_PARTITIONS 1
123 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
124 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
125 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
126 #define L1I_LINES_PER_TAG 1
127 #define L1I_SIZE_KB_AMD 64
128 #define L1I_ASSOCIATIVITY_AMD 2
130 /* Level 2 unified cache: */
131 #define L2_LINE_SIZE 64
132 #define L2_ASSOCIATIVITY 16
133 #define L2_SETS 4096
134 #define L2_PARTITIONS 1
135 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
136 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
137 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
138 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
139 #define L2_LINES_PER_TAG 1
140 #define L2_SIZE_KB_AMD 512
142 /* Level 3 unified cache: */
143 #define L3_SIZE_KB 0 /* disabled */
144 #define L3_ASSOCIATIVITY 0 /* disabled */
145 #define L3_LINES_PER_TAG 0 /* disabled */
146 #define L3_LINE_SIZE 0 /* disabled */
147 #define L3_N_LINE_SIZE 64
148 #define L3_N_ASSOCIATIVITY 16
149 #define L3_N_SETS 16384
150 #define L3_N_PARTITIONS 1
151 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
152 #define L3_N_LINES_PER_TAG 1
153 #define L3_N_SIZE_KB_AMD 16384
155 /* TLB definitions: */
157 #define L1_DTLB_2M_ASSOC 1
158 #define L1_DTLB_2M_ENTRIES 255
159 #define L1_DTLB_4K_ASSOC 1
160 #define L1_DTLB_4K_ENTRIES 255
162 #define L1_ITLB_2M_ASSOC 1
163 #define L1_ITLB_2M_ENTRIES 255
164 #define L1_ITLB_4K_ASSOC 1
165 #define L1_ITLB_4K_ENTRIES 255
167 #define L2_DTLB_2M_ASSOC 0 /* disabled */
168 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
169 #define L2_DTLB_4K_ASSOC 4
170 #define L2_DTLB_4K_ENTRIES 512
172 #define L2_ITLB_2M_ASSOC 0 /* disabled */
173 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
174 #define L2_ITLB_4K_ASSOC 4
175 #define L2_ITLB_4K_ENTRIES 512
179 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
180 uint32_t vendor2, uint32_t vendor3)
182 int i;
183 for (i = 0; i < 4; i++) {
184 dst[i] = vendor1 >> (8 * i);
185 dst[i + 4] = vendor2 >> (8 * i);
186 dst[i + 8] = vendor3 >> (8 * i);
188 dst[CPUID_VENDOR_SZ] = '\0';
191 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
192 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
193 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
194 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
195 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
196 CPUID_PSE36 | CPUID_FXSR)
197 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
198 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
199 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
200 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
201 CPUID_PAE | CPUID_SEP | CPUID_APIC)
203 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
204 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
205 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
206 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
207 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
208 /* partly implemented:
209 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
210 /* missing:
211 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
212 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
213 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
214 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
215 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
216 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
217 /* missing:
218 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
219 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
220 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
221 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
222 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
224 #ifdef TARGET_X86_64
225 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
226 #else
227 #define TCG_EXT2_X86_64_FEATURES 0
228 #endif
230 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
231 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
232 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
233 TCG_EXT2_X86_64_FEATURES)
234 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
235 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
236 #define TCG_EXT4_FEATURES 0
237 #define TCG_SVM_FEATURES 0
238 #define TCG_KVM_FEATURES 0
239 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
240 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
241 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
242 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
243 CPUID_7_0_EBX_ERMS)
244 /* missing:
245 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
246 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
247 CPUID_7_0_EBX_RDSEED */
248 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
249 CPUID_7_0_ECX_LA57)
250 #define TCG_7_0_EDX_FEATURES 0
251 #define TCG_APM_FEATURES 0
252 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
253 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
254 /* missing:
255 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
257 typedef struct FeatureWordInfo {
258 /* feature flags names are taken from "Intel Processor Identification and
259 * the CPUID Instruction" and AMD's "CPUID Specification".
260 * In cases of disagreement between feature naming conventions,
261 * aliases may be added.
263 const char *feat_names[32];
264 uint32_t cpuid_eax; /* Input EAX for CPUID */
265 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
266 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
267 int cpuid_reg; /* output register (R_* constant) */
268 uint32_t tcg_features; /* Feature flags supported by TCG */
269 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
270 uint32_t migratable_flags; /* Feature flags known to be migratable */
271 } FeatureWordInfo;
273 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
274 [FEAT_1_EDX] = {
275 .feat_names = {
276 "fpu", "vme", "de", "pse",
277 "tsc", "msr", "pae", "mce",
278 "cx8", "apic", NULL, "sep",
279 "mtrr", "pge", "mca", "cmov",
280 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
281 NULL, "ds" /* Intel dts */, "acpi", "mmx",
282 "fxsr", "sse", "sse2", "ss",
283 "ht" /* Intel htt */, "tm", "ia64", "pbe",
285 .cpuid_eax = 1, .cpuid_reg = R_EDX,
286 .tcg_features = TCG_FEATURES,
288 [FEAT_1_ECX] = {
289 .feat_names = {
290 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
291 "ds-cpl", "vmx", "smx", "est",
292 "tm2", "ssse3", "cid", NULL,
293 "fma", "cx16", "xtpr", "pdcm",
294 NULL, "pcid", "dca", "sse4.1",
295 "sse4.2", "x2apic", "movbe", "popcnt",
296 "tsc-deadline", "aes", "xsave", "osxsave",
297 "avx", "f16c", "rdrand", "hypervisor",
299 .cpuid_eax = 1, .cpuid_reg = R_ECX,
300 .tcg_features = TCG_EXT_FEATURES,
302 /* Feature names that are already defined on feature_name[] but
303 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
304 * names on feat_names below. They are copied automatically
305 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
307 [FEAT_8000_0001_EDX] = {
308 .feat_names = {
309 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
310 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
311 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
312 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
313 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
314 "nx", NULL, "mmxext", NULL /* mmx */,
315 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
316 NULL, "lm", "3dnowext", "3dnow",
318 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
319 .tcg_features = TCG_EXT2_FEATURES,
321 [FEAT_8000_0001_ECX] = {
322 .feat_names = {
323 "lahf-lm", "cmp-legacy", "svm", "extapic",
324 "cr8legacy", "abm", "sse4a", "misalignsse",
325 "3dnowprefetch", "osvw", "ibs", "xop",
326 "skinit", "wdt", NULL, "lwp",
327 "fma4", "tce", NULL, "nodeid-msr",
328 NULL, "tbm", "topoext", "perfctr-core",
329 "perfctr-nb", NULL, NULL, NULL,
330 NULL, NULL, NULL, NULL,
332 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
333 .tcg_features = TCG_EXT3_FEATURES,
335 [FEAT_C000_0001_EDX] = {
336 .feat_names = {
337 NULL, NULL, "xstore", "xstore-en",
338 NULL, NULL, "xcrypt", "xcrypt-en",
339 "ace2", "ace2-en", "phe", "phe-en",
340 "pmm", "pmm-en", NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
346 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
347 .tcg_features = TCG_EXT4_FEATURES,
349 [FEAT_KVM] = {
350 .feat_names = {
351 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
352 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
353 NULL, "kvm-pv-tlb-flush", NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 NULL, NULL, NULL, NULL,
357 "kvmclock-stable-bit", NULL, NULL, NULL,
358 NULL, NULL, NULL, NULL,
360 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
361 .tcg_features = TCG_KVM_FEATURES,
363 [FEAT_HYPERV_EAX] = {
364 .feat_names = {
365 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
366 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
367 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
368 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
369 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
370 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL,
375 NULL, NULL, NULL, NULL,
377 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
379 [FEAT_HYPERV_EBX] = {
380 .feat_names = {
381 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
382 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
383 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
384 NULL /* hv_create_port */, NULL /* hv_connect_port */,
385 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
386 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
387 NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL,
391 NULL, NULL, NULL, NULL,
393 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
395 [FEAT_HYPERV_EDX] = {
396 .feat_names = {
397 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
398 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
399 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
400 NULL, NULL,
401 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL,
408 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
410 [FEAT_SVM] = {
411 .feat_names = {
412 "npt", "lbrv", "svm-lock", "nrip-save",
413 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
414 NULL, NULL, "pause-filter", NULL,
415 "pfthreshold", NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL,
419 NULL, NULL, NULL, NULL,
421 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
422 .tcg_features = TCG_SVM_FEATURES,
424 [FEAT_7_0_EBX] = {
425 .feat_names = {
426 "fsgsbase", "tsc-adjust", NULL, "bmi1",
427 "hle", "avx2", NULL, "smep",
428 "bmi2", "erms", "invpcid", "rtm",
429 NULL, NULL, "mpx", NULL,
430 "avx512f", "avx512dq", "rdseed", "adx",
431 "smap", "avx512ifma", "pcommit", "clflushopt",
432 "clwb", NULL, "avx512pf", "avx512er",
433 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
435 .cpuid_eax = 7,
436 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
437 .cpuid_reg = R_EBX,
438 .tcg_features = TCG_7_0_EBX_FEATURES,
440 [FEAT_7_0_ECX] = {
441 .feat_names = {
442 NULL, "avx512vbmi", "umip", "pku",
443 "ospke", NULL, "avx512vbmi2", NULL,
444 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
445 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
446 "la57", NULL, NULL, NULL,
447 NULL, NULL, "rdpid", NULL,
448 NULL, NULL, NULL, NULL,
449 NULL, NULL, NULL, NULL,
451 .cpuid_eax = 7,
452 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
453 .cpuid_reg = R_ECX,
454 .tcg_features = TCG_7_0_ECX_FEATURES,
456 [FEAT_7_0_EDX] = {
457 .feat_names = {
458 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL,
464 NULL, NULL, "spec-ctrl", NULL,
465 NULL, NULL, NULL, NULL,
467 .cpuid_eax = 7,
468 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
469 .cpuid_reg = R_EDX,
470 .tcg_features = TCG_7_0_EDX_FEATURES,
472 [FEAT_8000_0007_EDX] = {
473 .feat_names = {
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 "invtsc", NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL,
481 NULL, NULL, NULL, NULL,
483 .cpuid_eax = 0x80000007,
484 .cpuid_reg = R_EDX,
485 .tcg_features = TCG_APM_FEATURES,
486 .unmigratable_flags = CPUID_APM_INVTSC,
488 [FEAT_8000_0008_EBX] = {
489 .feat_names = {
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 "ibpb", NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL,
497 NULL, NULL, NULL, NULL,
499 .cpuid_eax = 0x80000008,
500 .cpuid_reg = R_EBX,
501 .tcg_features = 0,
502 .unmigratable_flags = 0,
504 [FEAT_XSAVE] = {
505 .feat_names = {
506 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 NULL, NULL, NULL, NULL,
515 .cpuid_eax = 0xd,
516 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
517 .cpuid_reg = R_EAX,
518 .tcg_features = TCG_XSAVE_FEATURES,
520 [FEAT_6_EAX] = {
521 .feat_names = {
522 NULL, NULL, "arat", NULL,
523 NULL, NULL, NULL, NULL,
524 NULL, NULL, NULL, NULL,
525 NULL, NULL, NULL, NULL,
526 NULL, NULL, NULL, NULL,
527 NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 NULL, NULL, NULL, NULL,
531 .cpuid_eax = 6, .cpuid_reg = R_EAX,
532 .tcg_features = TCG_6_EAX_FEATURES,
534 [FEAT_XSAVE_COMP_LO] = {
535 .cpuid_eax = 0xD,
536 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
537 .cpuid_reg = R_EAX,
538 .tcg_features = ~0U,
539 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
540 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
541 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
542 XSTATE_PKRU_MASK,
544 [FEAT_XSAVE_COMP_HI] = {
545 .cpuid_eax = 0xD,
546 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
547 .cpuid_reg = R_EDX,
548 .tcg_features = ~0U,
552 typedef struct X86RegisterInfo32 {
553 /* Name of register */
554 const char *name;
555 /* QAPI enum value register */
556 X86CPURegister32 qapi_enum;
557 } X86RegisterInfo32;
559 #define REGISTER(reg) \
560 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
561 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
562 REGISTER(EAX),
563 REGISTER(ECX),
564 REGISTER(EDX),
565 REGISTER(EBX),
566 REGISTER(ESP),
567 REGISTER(EBP),
568 REGISTER(ESI),
569 REGISTER(EDI),
571 #undef REGISTER
573 typedef struct ExtSaveArea {
574 uint32_t feature, bits;
575 uint32_t offset, size;
576 } ExtSaveArea;
578 static const ExtSaveArea x86_ext_save_areas[] = {
579 [XSTATE_FP_BIT] = {
580 /* x87 FP state component is always enabled if XSAVE is supported */
581 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
582 /* x87 state is in the legacy region of the XSAVE area */
583 .offset = 0,
584 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
586 [XSTATE_SSE_BIT] = {
587 /* SSE state component is always enabled if XSAVE is supported */
588 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
589 /* SSE state is in the legacy region of the XSAVE area */
590 .offset = 0,
591 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
593 [XSTATE_YMM_BIT] =
594 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
595 .offset = offsetof(X86XSaveArea, avx_state),
596 .size = sizeof(XSaveAVX) },
597 [XSTATE_BNDREGS_BIT] =
598 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
599 .offset = offsetof(X86XSaveArea, bndreg_state),
600 .size = sizeof(XSaveBNDREG) },
601 [XSTATE_BNDCSR_BIT] =
602 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
603 .offset = offsetof(X86XSaveArea, bndcsr_state),
604 .size = sizeof(XSaveBNDCSR) },
605 [XSTATE_OPMASK_BIT] =
606 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
607 .offset = offsetof(X86XSaveArea, opmask_state),
608 .size = sizeof(XSaveOpmask) },
609 [XSTATE_ZMM_Hi256_BIT] =
610 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
611 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
612 .size = sizeof(XSaveZMM_Hi256) },
613 [XSTATE_Hi16_ZMM_BIT] =
614 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
615 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
616 .size = sizeof(XSaveHi16_ZMM) },
617 [XSTATE_PKRU_BIT] =
618 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
619 .offset = offsetof(X86XSaveArea, pkru_state),
620 .size = sizeof(XSavePKRU) },
623 static uint32_t xsave_area_size(uint64_t mask)
625 int i;
626 uint64_t ret = 0;
628 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
629 const ExtSaveArea *esa = &x86_ext_save_areas[i];
630 if ((mask >> i) & 1) {
631 ret = MAX(ret, esa->offset + esa->size);
634 return ret;
637 static inline bool accel_uses_host_cpuid(void)
639 return kvm_enabled() || hvf_enabled();
642 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
644 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
645 cpu->env.features[FEAT_XSAVE_COMP_LO];
648 const char *get_register_name_32(unsigned int reg)
650 if (reg >= CPU_NB_REGS32) {
651 return NULL;
653 return x86_reg_info_32[reg].name;
657 * Returns the set of feature flags that are supported and migratable by
658 * QEMU, for a given FeatureWord.
660 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
662 FeatureWordInfo *wi = &feature_word_info[w];
663 uint32_t r = 0;
664 int i;
666 for (i = 0; i < 32; i++) {
667 uint32_t f = 1U << i;
669 /* If the feature name is known, it is implicitly considered migratable,
670 * unless it is explicitly set in unmigratable_flags */
671 if ((wi->migratable_flags & f) ||
672 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
673 r |= f;
676 return r;
679 void host_cpuid(uint32_t function, uint32_t count,
680 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
682 uint32_t vec[4];
684 #ifdef __x86_64__
685 asm volatile("cpuid"
686 : "=a"(vec[0]), "=b"(vec[1]),
687 "=c"(vec[2]), "=d"(vec[3])
688 : "0"(function), "c"(count) : "cc");
689 #elif defined(__i386__)
690 asm volatile("pusha \n\t"
691 "cpuid \n\t"
692 "mov %%eax, 0(%2) \n\t"
693 "mov %%ebx, 4(%2) \n\t"
694 "mov %%ecx, 8(%2) \n\t"
695 "mov %%edx, 12(%2) \n\t"
696 "popa"
697 : : "a"(function), "c"(count), "S"(vec)
698 : "memory", "cc");
699 #else
700 abort();
701 #endif
703 if (eax)
704 *eax = vec[0];
705 if (ebx)
706 *ebx = vec[1];
707 if (ecx)
708 *ecx = vec[2];
709 if (edx)
710 *edx = vec[3];
713 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
715 uint32_t eax, ebx, ecx, edx;
717 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
718 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
720 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
721 if (family) {
722 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
724 if (model) {
725 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
727 if (stepping) {
728 *stepping = eax & 0x0F;
732 /* CPU class name definitions: */
734 /* Return type name for a given CPU model name
735 * Caller is responsible for freeing the returned string.
737 static char *x86_cpu_type_name(const char *model_name)
739 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
742 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
744 ObjectClass *oc;
745 char *typename;
747 if (cpu_model == NULL) {
748 return NULL;
751 typename = x86_cpu_type_name(cpu_model);
752 oc = object_class_by_name(typename);
753 g_free(typename);
754 return oc;
757 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
759 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
760 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
761 return g_strndup(class_name,
762 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
765 struct X86CPUDefinition {
766 const char *name;
767 uint32_t level;
768 uint32_t xlevel;
769 /* vendor is zero-terminated, 12 character ASCII string */
770 char vendor[CPUID_VENDOR_SZ + 1];
771 int family;
772 int model;
773 int stepping;
774 FeatureWordArray features;
775 const char *model_id;
778 static X86CPUDefinition builtin_x86_defs[] = {
780 .name = "qemu64",
781 .level = 0xd,
782 .vendor = CPUID_VENDOR_AMD,
783 .family = 6,
784 .model = 6,
785 .stepping = 3,
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
789 CPUID_PSE36,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
792 .features[FEAT_8000_0001_EDX] =
793 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
794 .features[FEAT_8000_0001_ECX] =
795 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
796 .xlevel = 0x8000000A,
797 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
800 .name = "phenom",
801 .level = 5,
802 .vendor = CPUID_VENDOR_AMD,
803 .family = 16,
804 .model = 2,
805 .stepping = 3,
806 /* Missing: CPUID_HT */
807 .features[FEAT_1_EDX] =
808 PPRO_FEATURES |
809 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
810 CPUID_PSE36 | CPUID_VME,
811 .features[FEAT_1_ECX] =
812 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
813 CPUID_EXT_POPCNT,
814 .features[FEAT_8000_0001_EDX] =
815 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
816 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
817 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
818 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
819 CPUID_EXT3_CR8LEG,
820 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
821 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
822 .features[FEAT_8000_0001_ECX] =
823 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
824 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
825 /* Missing: CPUID_SVM_LBRV */
826 .features[FEAT_SVM] =
827 CPUID_SVM_NPT,
828 .xlevel = 0x8000001A,
829 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
832 .name = "core2duo",
833 .level = 10,
834 .vendor = CPUID_VENDOR_INTEL,
835 .family = 6,
836 .model = 15,
837 .stepping = 11,
838 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
839 .features[FEAT_1_EDX] =
840 PPRO_FEATURES |
841 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
842 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
843 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
844 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
847 CPUID_EXT_CX16,
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 .features[FEAT_8000_0001_ECX] =
851 CPUID_EXT3_LAHF_LM,
852 .xlevel = 0x80000008,
853 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
856 .name = "kvm64",
857 .level = 0xd,
858 .vendor = CPUID_VENDOR_INTEL,
859 .family = 15,
860 .model = 6,
861 .stepping = 1,
862 /* Missing: CPUID_HT */
863 .features[FEAT_1_EDX] =
864 PPRO_FEATURES | CPUID_VME |
865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
866 CPUID_PSE36,
867 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
870 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
871 .features[FEAT_8000_0001_EDX] =
872 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
873 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
874 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
875 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
876 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
877 .features[FEAT_8000_0001_ECX] =
879 .xlevel = 0x80000008,
880 .model_id = "Common KVM processor"
883 .name = "qemu32",
884 .level = 4,
885 .vendor = CPUID_VENDOR_INTEL,
886 .family = 6,
887 .model = 6,
888 .stepping = 3,
889 .features[FEAT_1_EDX] =
890 PPRO_FEATURES,
891 .features[FEAT_1_ECX] =
892 CPUID_EXT_SSE3,
893 .xlevel = 0x80000004,
894 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
897 .name = "kvm32",
898 .level = 5,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 15,
901 .model = 6,
902 .stepping = 1,
903 .features[FEAT_1_EDX] =
904 PPRO_FEATURES | CPUID_VME |
905 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3,
908 .features[FEAT_8000_0001_ECX] =
910 .xlevel = 0x80000008,
911 .model_id = "Common 32-bit KVM processor"
914 .name = "coreduo",
915 .level = 10,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 6,
918 .model = 14,
919 .stepping = 8,
920 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
921 .features[FEAT_1_EDX] =
922 PPRO_FEATURES | CPUID_VME |
923 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
924 CPUID_SS,
925 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
926 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
927 .features[FEAT_1_ECX] =
928 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
929 .features[FEAT_8000_0001_EDX] =
930 CPUID_EXT2_NX,
931 .xlevel = 0x80000008,
932 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
935 .name = "486",
936 .level = 1,
937 .vendor = CPUID_VENDOR_INTEL,
938 .family = 4,
939 .model = 8,
940 .stepping = 0,
941 .features[FEAT_1_EDX] =
942 I486_FEATURES,
943 .xlevel = 0,
944 .model_id = "",
947 .name = "pentium",
948 .level = 1,
949 .vendor = CPUID_VENDOR_INTEL,
950 .family = 5,
951 .model = 4,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 PENTIUM_FEATURES,
955 .xlevel = 0,
956 .model_id = "",
959 .name = "pentium2",
960 .level = 2,
961 .vendor = CPUID_VENDOR_INTEL,
962 .family = 6,
963 .model = 5,
964 .stepping = 2,
965 .features[FEAT_1_EDX] =
966 PENTIUM2_FEATURES,
967 .xlevel = 0,
968 .model_id = "",
971 .name = "pentium3",
972 .level = 3,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 7,
976 .stepping = 3,
977 .features[FEAT_1_EDX] =
978 PENTIUM3_FEATURES,
979 .xlevel = 0,
980 .model_id = "",
983 .name = "athlon",
984 .level = 2,
985 .vendor = CPUID_VENDOR_AMD,
986 .family = 6,
987 .model = 2,
988 .stepping = 3,
989 .features[FEAT_1_EDX] =
990 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
991 CPUID_MCA,
992 .features[FEAT_8000_0001_EDX] =
993 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
994 .xlevel = 0x80000008,
995 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
998 .name = "n270",
999 .level = 10,
1000 .vendor = CPUID_VENDOR_INTEL,
1001 .family = 6,
1002 .model = 28,
1003 .stepping = 2,
1004 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1005 .features[FEAT_1_EDX] =
1006 PPRO_FEATURES |
1007 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1008 CPUID_ACPI | CPUID_SS,
1009 /* Some CPUs got no CPUID_SEP */
1010 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1011 * CPUID_EXT_XTPR */
1012 .features[FEAT_1_ECX] =
1013 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1014 CPUID_EXT_MOVBE,
1015 .features[FEAT_8000_0001_EDX] =
1016 CPUID_EXT2_NX,
1017 .features[FEAT_8000_0001_ECX] =
1018 CPUID_EXT3_LAHF_LM,
1019 .xlevel = 0x80000008,
1020 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1023 .name = "Conroe",
1024 .level = 10,
1025 .vendor = CPUID_VENDOR_INTEL,
1026 .family = 6,
1027 .model = 15,
1028 .stepping = 3,
1029 .features[FEAT_1_EDX] =
1030 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1031 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1032 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1033 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1034 CPUID_DE | CPUID_FP87,
1035 .features[FEAT_1_ECX] =
1036 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1037 .features[FEAT_8000_0001_EDX] =
1038 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1039 .features[FEAT_8000_0001_ECX] =
1040 CPUID_EXT3_LAHF_LM,
1041 .xlevel = 0x80000008,
1042 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1045 .name = "Penryn",
1046 .level = 10,
1047 .vendor = CPUID_VENDOR_INTEL,
1048 .family = 6,
1049 .model = 23,
1050 .stepping = 3,
1051 .features[FEAT_1_EDX] =
1052 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1059 CPUID_EXT_SSE3,
1060 .features[FEAT_8000_0001_EDX] =
1061 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1062 .features[FEAT_8000_0001_ECX] =
1063 CPUID_EXT3_LAHF_LM,
1064 .xlevel = 0x80000008,
1065 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1068 .name = "Nehalem",
1069 .level = 11,
1070 .vendor = CPUID_VENDOR_INTEL,
1071 .family = 6,
1072 .model = 26,
1073 .stepping = 3,
1074 .features[FEAT_1_EDX] =
1075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1079 CPUID_DE | CPUID_FP87,
1080 .features[FEAT_1_ECX] =
1081 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1082 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1083 .features[FEAT_8000_0001_EDX] =
1084 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1085 .features[FEAT_8000_0001_ECX] =
1086 CPUID_EXT3_LAHF_LM,
1087 .xlevel = 0x80000008,
1088 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1091 .name = "Nehalem-IBRS",
1092 .level = 11,
1093 .vendor = CPUID_VENDOR_INTEL,
1094 .family = 6,
1095 .model = 26,
1096 .stepping = 3,
1097 .features[FEAT_1_EDX] =
1098 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1099 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1100 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1101 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1102 CPUID_DE | CPUID_FP87,
1103 .features[FEAT_1_ECX] =
1104 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1105 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1106 .features[FEAT_7_0_EDX] =
1107 CPUID_7_0_EDX_SPEC_CTRL,
1108 .features[FEAT_8000_0001_EDX] =
1109 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1110 .features[FEAT_8000_0001_ECX] =
1111 CPUID_EXT3_LAHF_LM,
1112 .xlevel = 0x80000008,
1113 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1116 .name = "Westmere",
1117 .level = 11,
1118 .vendor = CPUID_VENDOR_INTEL,
1119 .family = 6,
1120 .model = 44,
1121 .stepping = 1,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1130 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1131 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1132 .features[FEAT_8000_0001_EDX] =
1133 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1134 .features[FEAT_8000_0001_ECX] =
1135 CPUID_EXT3_LAHF_LM,
1136 .features[FEAT_6_EAX] =
1137 CPUID_6_EAX_ARAT,
1138 .xlevel = 0x80000008,
1139 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1142 .name = "Westmere-IBRS",
1143 .level = 11,
1144 .vendor = CPUID_VENDOR_INTEL,
1145 .family = 6,
1146 .model = 44,
1147 .stepping = 1,
1148 .features[FEAT_1_EDX] =
1149 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1150 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1151 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1152 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1153 CPUID_DE | CPUID_FP87,
1154 .features[FEAT_1_ECX] =
1155 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1156 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1157 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1158 .features[FEAT_8000_0001_EDX] =
1159 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1160 .features[FEAT_8000_0001_ECX] =
1161 CPUID_EXT3_LAHF_LM,
1162 .features[FEAT_7_0_EDX] =
1163 CPUID_7_0_EDX_SPEC_CTRL,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1170 .name = "SandyBridge",
1171 .level = 0xd,
1172 .vendor = CPUID_VENDOR_INTEL,
1173 .family = 6,
1174 .model = 42,
1175 .stepping = 1,
1176 .features[FEAT_1_EDX] =
1177 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1178 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1179 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1180 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1181 CPUID_DE | CPUID_FP87,
1182 .features[FEAT_1_ECX] =
1183 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1184 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1185 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1186 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1187 CPUID_EXT_SSE3,
1188 .features[FEAT_8000_0001_EDX] =
1189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1190 CPUID_EXT2_SYSCALL,
1191 .features[FEAT_8000_0001_ECX] =
1192 CPUID_EXT3_LAHF_LM,
1193 .features[FEAT_XSAVE] =
1194 CPUID_XSAVE_XSAVEOPT,
1195 .features[FEAT_6_EAX] =
1196 CPUID_6_EAX_ARAT,
1197 .xlevel = 0x80000008,
1198 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1201 .name = "SandyBridge-IBRS",
1202 .level = 0xd,
1203 .vendor = CPUID_VENDOR_INTEL,
1204 .family = 6,
1205 .model = 42,
1206 .stepping = 1,
1207 .features[FEAT_1_EDX] =
1208 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1209 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1210 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1211 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1212 CPUID_DE | CPUID_FP87,
1213 .features[FEAT_1_ECX] =
1214 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1215 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1216 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1217 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1218 CPUID_EXT_SSE3,
1219 .features[FEAT_8000_0001_EDX] =
1220 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1221 CPUID_EXT2_SYSCALL,
1222 .features[FEAT_8000_0001_ECX] =
1223 CPUID_EXT3_LAHF_LM,
1224 .features[FEAT_7_0_EDX] =
1225 CPUID_7_0_EDX_SPEC_CTRL,
1226 .features[FEAT_XSAVE] =
1227 CPUID_XSAVE_XSAVEOPT,
1228 .features[FEAT_6_EAX] =
1229 CPUID_6_EAX_ARAT,
1230 .xlevel = 0x80000008,
1231 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1234 .name = "IvyBridge",
1235 .level = 0xd,
1236 .vendor = CPUID_VENDOR_INTEL,
1237 .family = 6,
1238 .model = 58,
1239 .stepping = 9,
1240 .features[FEAT_1_EDX] =
1241 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1242 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1243 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1244 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1245 CPUID_DE | CPUID_FP87,
1246 .features[FEAT_1_ECX] =
1247 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1248 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1249 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1250 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1251 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1252 .features[FEAT_7_0_EBX] =
1253 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1254 CPUID_7_0_EBX_ERMS,
1255 .features[FEAT_8000_0001_EDX] =
1256 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1257 CPUID_EXT2_SYSCALL,
1258 .features[FEAT_8000_0001_ECX] =
1259 CPUID_EXT3_LAHF_LM,
1260 .features[FEAT_XSAVE] =
1261 CPUID_XSAVE_XSAVEOPT,
1262 .features[FEAT_6_EAX] =
1263 CPUID_6_EAX_ARAT,
1264 .xlevel = 0x80000008,
1265 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1268 .name = "IvyBridge-IBRS",
1269 .level = 0xd,
1270 .vendor = CPUID_VENDOR_INTEL,
1271 .family = 6,
1272 .model = 58,
1273 .stepping = 9,
1274 .features[FEAT_1_EDX] =
1275 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1276 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1277 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1278 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1279 CPUID_DE | CPUID_FP87,
1280 .features[FEAT_1_ECX] =
1281 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1283 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1284 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1285 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1286 .features[FEAT_7_0_EBX] =
1287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1288 CPUID_7_0_EBX_ERMS,
1289 .features[FEAT_8000_0001_EDX] =
1290 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1291 CPUID_EXT2_SYSCALL,
1292 .features[FEAT_8000_0001_ECX] =
1293 CPUID_EXT3_LAHF_LM,
1294 .features[FEAT_7_0_EDX] =
1295 CPUID_7_0_EDX_SPEC_CTRL,
1296 .features[FEAT_XSAVE] =
1297 CPUID_XSAVE_XSAVEOPT,
1298 .features[FEAT_6_EAX] =
1299 CPUID_6_EAX_ARAT,
1300 .xlevel = 0x80000008,
1301 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1304 .name = "Haswell-noTSX",
1305 .level = 0xd,
1306 .vendor = CPUID_VENDOR_INTEL,
1307 .family = 6,
1308 .model = 60,
1309 .stepping = 1,
1310 .features[FEAT_1_EDX] =
1311 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1312 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1313 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1314 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1315 CPUID_DE | CPUID_FP87,
1316 .features[FEAT_1_ECX] =
1317 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1318 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1319 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1320 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1321 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1322 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1323 .features[FEAT_8000_0001_EDX] =
1324 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1325 CPUID_EXT2_SYSCALL,
1326 .features[FEAT_8000_0001_ECX] =
1327 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1328 .features[FEAT_7_0_EBX] =
1329 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1330 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1331 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1332 .features[FEAT_XSAVE] =
1333 CPUID_XSAVE_XSAVEOPT,
1334 .features[FEAT_6_EAX] =
1335 CPUID_6_EAX_ARAT,
1336 .xlevel = 0x80000008,
1337 .model_id = "Intel Core Processor (Haswell, no TSX)",
1340 .name = "Haswell-noTSX-IBRS",
1341 .level = 0xd,
1342 .vendor = CPUID_VENDOR_INTEL,
1343 .family = 6,
1344 .model = 60,
1345 .stepping = 1,
1346 .features[FEAT_1_EDX] =
1347 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1348 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1349 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1350 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1351 CPUID_DE | CPUID_FP87,
1352 .features[FEAT_1_ECX] =
1353 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1354 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1355 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1356 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1357 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1358 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1359 .features[FEAT_8000_0001_EDX] =
1360 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1361 CPUID_EXT2_SYSCALL,
1362 .features[FEAT_8000_0001_ECX] =
1363 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1364 .features[FEAT_7_0_EDX] =
1365 CPUID_7_0_EDX_SPEC_CTRL,
1366 .features[FEAT_7_0_EBX] =
1367 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1368 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1369 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1370 .features[FEAT_XSAVE] =
1371 CPUID_XSAVE_XSAVEOPT,
1372 .features[FEAT_6_EAX] =
1373 CPUID_6_EAX_ARAT,
1374 .xlevel = 0x80000008,
1375 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1378 .name = "Haswell",
1379 .level = 0xd,
1380 .vendor = CPUID_VENDOR_INTEL,
1381 .family = 6,
1382 .model = 60,
1383 .stepping = 4,
1384 .features[FEAT_1_EDX] =
1385 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1386 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1387 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1388 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1389 CPUID_DE | CPUID_FP87,
1390 .features[FEAT_1_ECX] =
1391 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1392 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1393 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1394 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1395 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1396 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1397 .features[FEAT_8000_0001_EDX] =
1398 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1399 CPUID_EXT2_SYSCALL,
1400 .features[FEAT_8000_0001_ECX] =
1401 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1402 .features[FEAT_7_0_EBX] =
1403 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1404 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1405 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1406 CPUID_7_0_EBX_RTM,
1407 .features[FEAT_XSAVE] =
1408 CPUID_XSAVE_XSAVEOPT,
1409 .features[FEAT_6_EAX] =
1410 CPUID_6_EAX_ARAT,
1411 .xlevel = 0x80000008,
1412 .model_id = "Intel Core Processor (Haswell)",
1415 .name = "Haswell-IBRS",
1416 .level = 0xd,
1417 .vendor = CPUID_VENDOR_INTEL,
1418 .family = 6,
1419 .model = 60,
1420 .stepping = 4,
1421 .features[FEAT_1_EDX] =
1422 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1423 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1424 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1425 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1426 CPUID_DE | CPUID_FP87,
1427 .features[FEAT_1_ECX] =
1428 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1429 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1430 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1431 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1432 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1433 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1434 .features[FEAT_8000_0001_EDX] =
1435 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1436 CPUID_EXT2_SYSCALL,
1437 .features[FEAT_8000_0001_ECX] =
1438 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1439 .features[FEAT_7_0_EDX] =
1440 CPUID_7_0_EDX_SPEC_CTRL,
1441 .features[FEAT_7_0_EBX] =
1442 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1443 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1444 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1445 CPUID_7_0_EBX_RTM,
1446 .features[FEAT_XSAVE] =
1447 CPUID_XSAVE_XSAVEOPT,
1448 .features[FEAT_6_EAX] =
1449 CPUID_6_EAX_ARAT,
1450 .xlevel = 0x80000008,
1451 .model_id = "Intel Core Processor (Haswell, IBRS)",
1454 .name = "Broadwell-noTSX",
1455 .level = 0xd,
1456 .vendor = CPUID_VENDOR_INTEL,
1457 .family = 6,
1458 .model = 61,
1459 .stepping = 2,
1460 .features[FEAT_1_EDX] =
1461 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1462 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1463 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1464 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1465 CPUID_DE | CPUID_FP87,
1466 .features[FEAT_1_ECX] =
1467 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1468 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1469 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1470 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1471 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1472 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1473 .features[FEAT_8000_0001_EDX] =
1474 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1475 CPUID_EXT2_SYSCALL,
1476 .features[FEAT_8000_0001_ECX] =
1477 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1478 .features[FEAT_7_0_EBX] =
1479 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1480 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1481 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1482 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1483 CPUID_7_0_EBX_SMAP,
1484 .features[FEAT_XSAVE] =
1485 CPUID_XSAVE_XSAVEOPT,
1486 .features[FEAT_6_EAX] =
1487 CPUID_6_EAX_ARAT,
1488 .xlevel = 0x80000008,
1489 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1492 .name = "Broadwell-noTSX-IBRS",
1493 .level = 0xd,
1494 .vendor = CPUID_VENDOR_INTEL,
1495 .family = 6,
1496 .model = 61,
1497 .stepping = 2,
1498 .features[FEAT_1_EDX] =
1499 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1500 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1501 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1502 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1503 CPUID_DE | CPUID_FP87,
1504 .features[FEAT_1_ECX] =
1505 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1506 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1507 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1508 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1509 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1510 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1511 .features[FEAT_8000_0001_EDX] =
1512 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1513 CPUID_EXT2_SYSCALL,
1514 .features[FEAT_8000_0001_ECX] =
1515 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1516 .features[FEAT_7_0_EDX] =
1517 CPUID_7_0_EDX_SPEC_CTRL,
1518 .features[FEAT_7_0_EBX] =
1519 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1520 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1521 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1522 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1523 CPUID_7_0_EBX_SMAP,
1524 .features[FEAT_XSAVE] =
1525 CPUID_XSAVE_XSAVEOPT,
1526 .features[FEAT_6_EAX] =
1527 CPUID_6_EAX_ARAT,
1528 .xlevel = 0x80000008,
1529 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1532 .name = "Broadwell",
1533 .level = 0xd,
1534 .vendor = CPUID_VENDOR_INTEL,
1535 .family = 6,
1536 .model = 61,
1537 .stepping = 2,
1538 .features[FEAT_1_EDX] =
1539 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1540 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1541 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1542 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1543 CPUID_DE | CPUID_FP87,
1544 .features[FEAT_1_ECX] =
1545 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1546 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1547 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1548 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1549 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1550 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1551 .features[FEAT_8000_0001_EDX] =
1552 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1553 CPUID_EXT2_SYSCALL,
1554 .features[FEAT_8000_0001_ECX] =
1555 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1556 .features[FEAT_7_0_EBX] =
1557 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1558 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1559 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1560 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1561 CPUID_7_0_EBX_SMAP,
1562 .features[FEAT_XSAVE] =
1563 CPUID_XSAVE_XSAVEOPT,
1564 .features[FEAT_6_EAX] =
1565 CPUID_6_EAX_ARAT,
1566 .xlevel = 0x80000008,
1567 .model_id = "Intel Core Processor (Broadwell)",
1570 .name = "Broadwell-IBRS",
1571 .level = 0xd,
1572 .vendor = CPUID_VENDOR_INTEL,
1573 .family = 6,
1574 .model = 61,
1575 .stepping = 2,
1576 .features[FEAT_1_EDX] =
1577 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1578 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1579 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1580 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1581 CPUID_DE | CPUID_FP87,
1582 .features[FEAT_1_ECX] =
1583 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1584 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1585 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1586 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1587 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1588 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1589 .features[FEAT_8000_0001_EDX] =
1590 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1591 CPUID_EXT2_SYSCALL,
1592 .features[FEAT_8000_0001_ECX] =
1593 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1594 .features[FEAT_7_0_EDX] =
1595 CPUID_7_0_EDX_SPEC_CTRL,
1596 .features[FEAT_7_0_EBX] =
1597 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1598 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1599 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1600 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1601 CPUID_7_0_EBX_SMAP,
1602 .features[FEAT_XSAVE] =
1603 CPUID_XSAVE_XSAVEOPT,
1604 .features[FEAT_6_EAX] =
1605 CPUID_6_EAX_ARAT,
1606 .xlevel = 0x80000008,
1607 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1610 .name = "Skylake-Client",
1611 .level = 0xd,
1612 .vendor = CPUID_VENDOR_INTEL,
1613 .family = 6,
1614 .model = 94,
1615 .stepping = 3,
1616 .features[FEAT_1_EDX] =
1617 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1618 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1619 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1620 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1621 CPUID_DE | CPUID_FP87,
1622 .features[FEAT_1_ECX] =
1623 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1624 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1625 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1626 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1627 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1628 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1629 .features[FEAT_8000_0001_EDX] =
1630 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1631 CPUID_EXT2_SYSCALL,
1632 .features[FEAT_8000_0001_ECX] =
1633 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1634 .features[FEAT_7_0_EBX] =
1635 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1636 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1637 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1638 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1639 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1640 /* Missing: XSAVES (not supported by some Linux versions,
1641 * including v4.1 to v4.12).
1642 * KVM doesn't yet expose any XSAVES state save component,
1643 * and the only one defined in Skylake (processor tracing)
1644 * probably will block migration anyway.
1646 .features[FEAT_XSAVE] =
1647 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1648 CPUID_XSAVE_XGETBV1,
1649 .features[FEAT_6_EAX] =
1650 CPUID_6_EAX_ARAT,
1651 .xlevel = 0x80000008,
1652 .model_id = "Intel Core Processor (Skylake)",
1655 .name = "Skylake-Client-IBRS",
1656 .level = 0xd,
1657 .vendor = CPUID_VENDOR_INTEL,
1658 .family = 6,
1659 .model = 94,
1660 .stepping = 3,
1661 .features[FEAT_1_EDX] =
1662 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1663 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1664 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1665 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1666 CPUID_DE | CPUID_FP87,
1667 .features[FEAT_1_ECX] =
1668 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1669 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1670 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1671 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1672 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1673 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1674 .features[FEAT_8000_0001_EDX] =
1675 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1676 CPUID_EXT2_SYSCALL,
1677 .features[FEAT_8000_0001_ECX] =
1678 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1679 .features[FEAT_7_0_EDX] =
1680 CPUID_7_0_EDX_SPEC_CTRL,
1681 .features[FEAT_7_0_EBX] =
1682 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1683 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1684 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1685 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1686 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1687 /* Missing: XSAVES (not supported by some Linux versions,
1688 * including v4.1 to v4.12).
1689 * KVM doesn't yet expose any XSAVES state save component,
1690 * and the only one defined in Skylake (processor tracing)
1691 * probably will block migration anyway.
1693 .features[FEAT_XSAVE] =
1694 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1695 CPUID_XSAVE_XGETBV1,
1696 .features[FEAT_6_EAX] =
1697 CPUID_6_EAX_ARAT,
1698 .xlevel = 0x80000008,
1699 .model_id = "Intel Core Processor (Skylake, IBRS)",
1702 .name = "Skylake-Server",
1703 .level = 0xd,
1704 .vendor = CPUID_VENDOR_INTEL,
1705 .family = 6,
1706 .model = 85,
1707 .stepping = 4,
1708 .features[FEAT_1_EDX] =
1709 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1710 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1711 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1712 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1713 CPUID_DE | CPUID_FP87,
1714 .features[FEAT_1_ECX] =
1715 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1716 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1717 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1718 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1719 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1720 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1721 .features[FEAT_8000_0001_EDX] =
1722 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1723 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1724 .features[FEAT_8000_0001_ECX] =
1725 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1726 .features[FEAT_7_0_EBX] =
1727 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1728 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1729 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1730 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1731 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1732 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1733 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1734 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1735 /* Missing: XSAVES (not supported by some Linux versions,
1736 * including v4.1 to v4.12).
1737 * KVM doesn't yet expose any XSAVES state save component,
1738 * and the only one defined in Skylake (processor tracing)
1739 * probably will block migration anyway.
1741 .features[FEAT_XSAVE] =
1742 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1743 CPUID_XSAVE_XGETBV1,
1744 .features[FEAT_6_EAX] =
1745 CPUID_6_EAX_ARAT,
1746 .xlevel = 0x80000008,
1747 .model_id = "Intel Xeon Processor (Skylake)",
1750 .name = "Skylake-Server-IBRS",
1751 .level = 0xd,
1752 .vendor = CPUID_VENDOR_INTEL,
1753 .family = 6,
1754 .model = 85,
1755 .stepping = 4,
1756 .features[FEAT_1_EDX] =
1757 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1758 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1759 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1760 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1761 CPUID_DE | CPUID_FP87,
1762 .features[FEAT_1_ECX] =
1763 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1764 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1765 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1766 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1767 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1768 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1769 .features[FEAT_8000_0001_EDX] =
1770 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1771 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1772 .features[FEAT_8000_0001_ECX] =
1773 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1774 .features[FEAT_7_0_EDX] =
1775 CPUID_7_0_EDX_SPEC_CTRL,
1776 .features[FEAT_7_0_EBX] =
1777 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1778 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1779 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1780 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1781 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1782 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1783 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1784 CPUID_7_0_EBX_AVX512VL,
1785 /* Missing: XSAVES (not supported by some Linux versions,
1786 * including v4.1 to v4.12).
1787 * KVM doesn't yet expose any XSAVES state save component,
1788 * and the only one defined in Skylake (processor tracing)
1789 * probably will block migration anyway.
1791 .features[FEAT_XSAVE] =
1792 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1793 CPUID_XSAVE_XGETBV1,
1794 .features[FEAT_6_EAX] =
1795 CPUID_6_EAX_ARAT,
1796 .xlevel = 0x80000008,
1797 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1800 .name = "Opteron_G1",
1801 .level = 5,
1802 .vendor = CPUID_VENDOR_AMD,
1803 .family = 15,
1804 .model = 6,
1805 .stepping = 1,
1806 .features[FEAT_1_EDX] =
1807 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1808 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1809 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1810 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1811 CPUID_DE | CPUID_FP87,
1812 .features[FEAT_1_ECX] =
1813 CPUID_EXT_SSE3,
1814 .features[FEAT_8000_0001_EDX] =
1815 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1816 .xlevel = 0x80000008,
1817 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1820 .name = "Opteron_G2",
1821 .level = 5,
1822 .vendor = CPUID_VENDOR_AMD,
1823 .family = 15,
1824 .model = 6,
1825 .stepping = 1,
1826 .features[FEAT_1_EDX] =
1827 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1828 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1829 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1830 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1831 CPUID_DE | CPUID_FP87,
1832 .features[FEAT_1_ECX] =
1833 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1834 /* Missing: CPUID_EXT2_RDTSCP */
1835 .features[FEAT_8000_0001_EDX] =
1836 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1837 .features[FEAT_8000_0001_ECX] =
1838 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1839 .xlevel = 0x80000008,
1840 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1843 .name = "Opteron_G3",
1844 .level = 5,
1845 .vendor = CPUID_VENDOR_AMD,
1846 .family = 16,
1847 .model = 2,
1848 .stepping = 3,
1849 .features[FEAT_1_EDX] =
1850 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1851 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1852 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1853 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1854 CPUID_DE | CPUID_FP87,
1855 .features[FEAT_1_ECX] =
1856 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1857 CPUID_EXT_SSE3,
1858 /* Missing: CPUID_EXT2_RDTSCP */
1859 .features[FEAT_8000_0001_EDX] =
1860 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1861 .features[FEAT_8000_0001_ECX] =
1862 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1863 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1864 .xlevel = 0x80000008,
1865 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1868 .name = "Opteron_G4",
1869 .level = 0xd,
1870 .vendor = CPUID_VENDOR_AMD,
1871 .family = 21,
1872 .model = 1,
1873 .stepping = 2,
1874 .features[FEAT_1_EDX] =
1875 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1876 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1877 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1878 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1879 CPUID_DE | CPUID_FP87,
1880 .features[FEAT_1_ECX] =
1881 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1882 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1883 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1884 CPUID_EXT_SSE3,
1885 /* Missing: CPUID_EXT2_RDTSCP */
1886 .features[FEAT_8000_0001_EDX] =
1887 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1888 CPUID_EXT2_SYSCALL,
1889 .features[FEAT_8000_0001_ECX] =
1890 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1891 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1892 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1893 CPUID_EXT3_LAHF_LM,
1894 /* no xsaveopt! */
1895 .xlevel = 0x8000001A,
1896 .model_id = "AMD Opteron 62xx class CPU",
1899 .name = "Opteron_G5",
1900 .level = 0xd,
1901 .vendor = CPUID_VENDOR_AMD,
1902 .family = 21,
1903 .model = 2,
1904 .stepping = 0,
1905 .features[FEAT_1_EDX] =
1906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1910 CPUID_DE | CPUID_FP87,
1911 .features[FEAT_1_ECX] =
1912 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1913 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1914 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1915 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1916 /* Missing: CPUID_EXT2_RDTSCP */
1917 .features[FEAT_8000_0001_EDX] =
1918 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1919 CPUID_EXT2_SYSCALL,
1920 .features[FEAT_8000_0001_ECX] =
1921 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1922 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1923 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1924 CPUID_EXT3_LAHF_LM,
1925 /* no xsaveopt! */
1926 .xlevel = 0x8000001A,
1927 .model_id = "AMD Opteron 63xx class CPU",
1930 .name = "EPYC",
1931 .level = 0xd,
1932 .vendor = CPUID_VENDOR_AMD,
1933 .family = 23,
1934 .model = 1,
1935 .stepping = 2,
1936 .features[FEAT_1_EDX] =
1937 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1938 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1939 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1940 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1941 CPUID_VME | CPUID_FP87,
1942 .features[FEAT_1_ECX] =
1943 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1944 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1945 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1946 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1947 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1948 .features[FEAT_8000_0001_EDX] =
1949 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1950 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1951 CPUID_EXT2_SYSCALL,
1952 .features[FEAT_8000_0001_ECX] =
1953 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1954 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1955 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1956 .features[FEAT_7_0_EBX] =
1957 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1958 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1959 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1960 CPUID_7_0_EBX_SHA_NI,
1961 /* Missing: XSAVES (not supported by some Linux versions,
1962 * including v4.1 to v4.12).
1963 * KVM doesn't yet expose any XSAVES state save component.
1965 .features[FEAT_XSAVE] =
1966 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1967 CPUID_XSAVE_XGETBV1,
1968 .features[FEAT_6_EAX] =
1969 CPUID_6_EAX_ARAT,
1970 .xlevel = 0x8000000A,
1971 .model_id = "AMD EPYC Processor",
1974 .name = "EPYC-IBPB",
1975 .level = 0xd,
1976 .vendor = CPUID_VENDOR_AMD,
1977 .family = 23,
1978 .model = 1,
1979 .stepping = 2,
1980 .features[FEAT_1_EDX] =
1981 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1982 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1983 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1984 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1985 CPUID_VME | CPUID_FP87,
1986 .features[FEAT_1_ECX] =
1987 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1988 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1989 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1990 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1991 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1992 .features[FEAT_8000_0001_EDX] =
1993 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1994 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1995 CPUID_EXT2_SYSCALL,
1996 .features[FEAT_8000_0001_ECX] =
1997 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1998 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1999 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2000 .features[FEAT_8000_0008_EBX] =
2001 CPUID_8000_0008_EBX_IBPB,
2002 .features[FEAT_7_0_EBX] =
2003 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2004 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2005 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2006 CPUID_7_0_EBX_SHA_NI,
2007 /* Missing: XSAVES (not supported by some Linux versions,
2008 * including v4.1 to v4.12).
2009 * KVM doesn't yet expose any XSAVES state save component.
2011 .features[FEAT_XSAVE] =
2012 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2013 CPUID_XSAVE_XGETBV1,
2014 .features[FEAT_6_EAX] =
2015 CPUID_6_EAX_ARAT,
2016 .xlevel = 0x8000000A,
2017 .model_id = "AMD EPYC Processor (with IBPB)",
2021 typedef struct PropValue {
2022 const char *prop, *value;
2023 } PropValue;
2025 /* KVM-specific features that are automatically added/removed
2026 * from all CPU models when KVM is enabled.
2028 static PropValue kvm_default_props[] = {
2029 { "kvmclock", "on" },
2030 { "kvm-nopiodelay", "on" },
2031 { "kvm-asyncpf", "on" },
2032 { "kvm-steal-time", "on" },
2033 { "kvm-pv-eoi", "on" },
2034 { "kvmclock-stable-bit", "on" },
2035 { "x2apic", "on" },
2036 { "acpi", "off" },
2037 { "monitor", "off" },
2038 { "svm", "off" },
2039 { NULL, NULL },
2042 /* TCG-specific defaults that override all CPU models when using TCG
2044 static PropValue tcg_default_props[] = {
2045 { "vme", "off" },
2046 { NULL, NULL },
2050 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2052 PropValue *pv;
2053 for (pv = kvm_default_props; pv->prop; pv++) {
2054 if (!strcmp(pv->prop, prop)) {
2055 pv->value = value;
2056 break;
2060 /* It is valid to call this function only for properties that
2061 * are already present in the kvm_default_props table.
2063 assert(pv->prop);
2066 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2067 bool migratable_only);
2069 static bool lmce_supported(void)
2071 uint64_t mce_cap = 0;
2073 #ifdef CONFIG_KVM
2074 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2075 return false;
2077 #endif
2079 return !!(mce_cap & MCG_LMCE_P);
2082 #define CPUID_MODEL_ID_SZ 48
2085 * cpu_x86_fill_model_id:
2086 * Get CPUID model ID string from host CPU.
2088 * @str should have at least CPUID_MODEL_ID_SZ bytes
2090 * The function does NOT add a null terminator to the string
2091 * automatically.
2093 static int cpu_x86_fill_model_id(char *str)
2095 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2096 int i;
2098 for (i = 0; i < 3; i++) {
2099 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2100 memcpy(str + i * 16 + 0, &eax, 4);
2101 memcpy(str + i * 16 + 4, &ebx, 4);
2102 memcpy(str + i * 16 + 8, &ecx, 4);
2103 memcpy(str + i * 16 + 12, &edx, 4);
2105 return 0;
2108 static Property max_x86_cpu_properties[] = {
2109 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2110 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2111 DEFINE_PROP_END_OF_LIST()
2114 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2116 DeviceClass *dc = DEVICE_CLASS(oc);
2117 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2119 xcc->ordering = 9;
2121 xcc->model_description =
2122 "Enables all features supported by the accelerator in the current host";
2124 dc->props = max_x86_cpu_properties;
2127 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2129 static void max_x86_cpu_initfn(Object *obj)
2131 X86CPU *cpu = X86_CPU(obj);
2132 CPUX86State *env = &cpu->env;
2133 KVMState *s = kvm_state;
2135 /* We can't fill the features array here because we don't know yet if
2136 * "migratable" is true or false.
2138 cpu->max_features = true;
2140 if (accel_uses_host_cpuid()) {
2141 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2142 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2143 int family, model, stepping;
2144 X86CPUDefinition host_cpudef = { };
2145 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2147 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2148 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2150 host_vendor_fms(vendor, &family, &model, &stepping);
2152 cpu_x86_fill_model_id(model_id);
2154 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2155 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2156 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2157 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2158 &error_abort);
2159 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2160 &error_abort);
2162 if (kvm_enabled()) {
2163 env->cpuid_min_level =
2164 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2165 env->cpuid_min_xlevel =
2166 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2167 env->cpuid_min_xlevel2 =
2168 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2169 } else {
2170 env->cpuid_min_level =
2171 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2172 env->cpuid_min_xlevel =
2173 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2174 env->cpuid_min_xlevel2 =
2175 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2178 if (lmce_supported()) {
2179 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2181 } else {
2182 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2183 "vendor", &error_abort);
2184 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2185 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2186 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2187 object_property_set_str(OBJECT(cpu),
2188 "QEMU TCG CPU version " QEMU_HW_VERSION,
2189 "model-id", &error_abort);
2192 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2195 static const TypeInfo max_x86_cpu_type_info = {
2196 .name = X86_CPU_TYPE_NAME("max"),
2197 .parent = TYPE_X86_CPU,
2198 .instance_init = max_x86_cpu_initfn,
2199 .class_init = max_x86_cpu_class_init,
2202 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2203 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2205 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2207 xcc->host_cpuid_required = true;
2208 xcc->ordering = 8;
2210 if (kvm_enabled()) {
2211 xcc->model_description =
2212 "KVM processor with all supported host features ";
2213 } else if (hvf_enabled()) {
2214 xcc->model_description =
2215 "HVF processor with all supported host features ";
2219 static const TypeInfo host_x86_cpu_type_info = {
2220 .name = X86_CPU_TYPE_NAME("host"),
2221 .parent = X86_CPU_TYPE_NAME("max"),
2222 .class_init = host_x86_cpu_class_init,
2225 #endif
2227 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2229 FeatureWordInfo *f = &feature_word_info[w];
2230 int i;
2232 for (i = 0; i < 32; ++i) {
2233 if ((1UL << i) & mask) {
2234 const char *reg = get_register_name_32(f->cpuid_reg);
2235 assert(reg);
2236 warn_report("%s doesn't support requested feature: "
2237 "CPUID.%02XH:%s%s%s [bit %d]",
2238 accel_uses_host_cpuid() ? "host" : "TCG",
2239 f->cpuid_eax, reg,
2240 f->feat_names[i] ? "." : "",
2241 f->feat_names[i] ? f->feat_names[i] : "", i);
2246 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2247 const char *name, void *opaque,
2248 Error **errp)
2250 X86CPU *cpu = X86_CPU(obj);
2251 CPUX86State *env = &cpu->env;
2252 int64_t value;
2254 value = (env->cpuid_version >> 8) & 0xf;
2255 if (value == 0xf) {
2256 value += (env->cpuid_version >> 20) & 0xff;
2258 visit_type_int(v, name, &value, errp);
2261 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2262 const char *name, void *opaque,
2263 Error **errp)
2265 X86CPU *cpu = X86_CPU(obj);
2266 CPUX86State *env = &cpu->env;
2267 const int64_t min = 0;
2268 const int64_t max = 0xff + 0xf;
2269 Error *local_err = NULL;
2270 int64_t value;
2272 visit_type_int(v, name, &value, &local_err);
2273 if (local_err) {
2274 error_propagate(errp, local_err);
2275 return;
2277 if (value < min || value > max) {
2278 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2279 name ? name : "null", value, min, max);
2280 return;
2283 env->cpuid_version &= ~0xff00f00;
2284 if (value > 0x0f) {
2285 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2286 } else {
2287 env->cpuid_version |= value << 8;
2291 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2292 const char *name, void *opaque,
2293 Error **errp)
2295 X86CPU *cpu = X86_CPU(obj);
2296 CPUX86State *env = &cpu->env;
2297 int64_t value;
2299 value = (env->cpuid_version >> 4) & 0xf;
2300 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2301 visit_type_int(v, name, &value, errp);
2304 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2305 const char *name, void *opaque,
2306 Error **errp)
2308 X86CPU *cpu = X86_CPU(obj);
2309 CPUX86State *env = &cpu->env;
2310 const int64_t min = 0;
2311 const int64_t max = 0xff;
2312 Error *local_err = NULL;
2313 int64_t value;
2315 visit_type_int(v, name, &value, &local_err);
2316 if (local_err) {
2317 error_propagate(errp, local_err);
2318 return;
2320 if (value < min || value > max) {
2321 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2322 name ? name : "null", value, min, max);
2323 return;
2326 env->cpuid_version &= ~0xf00f0;
2327 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2330 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2331 const char *name, void *opaque,
2332 Error **errp)
2334 X86CPU *cpu = X86_CPU(obj);
2335 CPUX86State *env = &cpu->env;
2336 int64_t value;
2338 value = env->cpuid_version & 0xf;
2339 visit_type_int(v, name, &value, errp);
2342 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2343 const char *name, void *opaque,
2344 Error **errp)
2346 X86CPU *cpu = X86_CPU(obj);
2347 CPUX86State *env = &cpu->env;
2348 const int64_t min = 0;
2349 const int64_t max = 0xf;
2350 Error *local_err = NULL;
2351 int64_t value;
2353 visit_type_int(v, name, &value, &local_err);
2354 if (local_err) {
2355 error_propagate(errp, local_err);
2356 return;
2358 if (value < min || value > max) {
2359 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2360 name ? name : "null", value, min, max);
2361 return;
2364 env->cpuid_version &= ~0xf;
2365 env->cpuid_version |= value & 0xf;
2368 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2370 X86CPU *cpu = X86_CPU(obj);
2371 CPUX86State *env = &cpu->env;
2372 char *value;
2374 value = g_malloc(CPUID_VENDOR_SZ + 1);
2375 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2376 env->cpuid_vendor3);
2377 return value;
2380 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2381 Error **errp)
2383 X86CPU *cpu = X86_CPU(obj);
2384 CPUX86State *env = &cpu->env;
2385 int i;
2387 if (strlen(value) != CPUID_VENDOR_SZ) {
2388 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2389 return;
2392 env->cpuid_vendor1 = 0;
2393 env->cpuid_vendor2 = 0;
2394 env->cpuid_vendor3 = 0;
2395 for (i = 0; i < 4; i++) {
2396 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2397 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2398 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2402 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2404 X86CPU *cpu = X86_CPU(obj);
2405 CPUX86State *env = &cpu->env;
2406 char *value;
2407 int i;
2409 value = g_malloc(48 + 1);
2410 for (i = 0; i < 48; i++) {
2411 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2413 value[48] = '\0';
2414 return value;
2417 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2418 Error **errp)
2420 X86CPU *cpu = X86_CPU(obj);
2421 CPUX86State *env = &cpu->env;
2422 int c, len, i;
2424 if (model_id == NULL) {
2425 model_id = "";
2427 len = strlen(model_id);
2428 memset(env->cpuid_model, 0, 48);
2429 for (i = 0; i < 48; i++) {
2430 if (i >= len) {
2431 c = '\0';
2432 } else {
2433 c = (uint8_t)model_id[i];
2435 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2439 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2440 void *opaque, Error **errp)
2442 X86CPU *cpu = X86_CPU(obj);
2443 int64_t value;
2445 value = cpu->env.tsc_khz * 1000;
2446 visit_type_int(v, name, &value, errp);
2449 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2450 void *opaque, Error **errp)
2452 X86CPU *cpu = X86_CPU(obj);
2453 const int64_t min = 0;
2454 const int64_t max = INT64_MAX;
2455 Error *local_err = NULL;
2456 int64_t value;
2458 visit_type_int(v, name, &value, &local_err);
2459 if (local_err) {
2460 error_propagate(errp, local_err);
2461 return;
2463 if (value < min || value > max) {
2464 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2465 name ? name : "null", value, min, max);
2466 return;
2469 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2472 /* Generic getter for "feature-words" and "filtered-features" properties */
2473 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2474 const char *name, void *opaque,
2475 Error **errp)
2477 uint32_t *array = (uint32_t *)opaque;
2478 FeatureWord w;
2479 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2480 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2481 X86CPUFeatureWordInfoList *list = NULL;
2483 for (w = 0; w < FEATURE_WORDS; w++) {
2484 FeatureWordInfo *wi = &feature_word_info[w];
2485 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2486 qwi->cpuid_input_eax = wi->cpuid_eax;
2487 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2488 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2489 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2490 qwi->features = array[w];
2492 /* List will be in reverse order, but order shouldn't matter */
2493 list_entries[w].next = list;
2494 list_entries[w].value = &word_infos[w];
2495 list = &list_entries[w];
2498 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2501 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2502 void *opaque, Error **errp)
2504 X86CPU *cpu = X86_CPU(obj);
2505 int64_t value = cpu->hyperv_spinlock_attempts;
2507 visit_type_int(v, name, &value, errp);
2510 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2511 void *opaque, Error **errp)
2513 const int64_t min = 0xFFF;
2514 const int64_t max = UINT_MAX;
2515 X86CPU *cpu = X86_CPU(obj);
2516 Error *err = NULL;
2517 int64_t value;
2519 visit_type_int(v, name, &value, &err);
2520 if (err) {
2521 error_propagate(errp, err);
2522 return;
2525 if (value < min || value > max) {
2526 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2527 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2528 object_get_typename(obj), name ? name : "null",
2529 value, min, max);
2530 return;
2532 cpu->hyperv_spinlock_attempts = value;
2535 static const PropertyInfo qdev_prop_spinlocks = {
2536 .name = "int",
2537 .get = x86_get_hv_spinlocks,
2538 .set = x86_set_hv_spinlocks,
2541 /* Convert all '_' in a feature string option name to '-', to make feature
2542 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2544 static inline void feat2prop(char *s)
2546 while ((s = strchr(s, '_'))) {
2547 *s = '-';
2551 /* Return the feature property name for a feature flag bit */
2552 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2554 /* XSAVE components are automatically enabled by other features,
2555 * so return the original feature name instead
2557 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2558 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2560 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2561 x86_ext_save_areas[comp].bits) {
2562 w = x86_ext_save_areas[comp].feature;
2563 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2567 assert(bitnr < 32);
2568 assert(w < FEATURE_WORDS);
2569 return feature_word_info[w].feat_names[bitnr];
2572 /* Compatibily hack to maintain legacy +-feat semantic,
2573 * where +-feat overwrites any feature set by
2574 * feat=on|feat even if the later is parsed after +-feat
2575 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2577 static GList *plus_features, *minus_features;
2579 static gint compare_string(gconstpointer a, gconstpointer b)
2581 return g_strcmp0(a, b);
2584 /* Parse "+feature,-feature,feature=foo" CPU feature string
2586 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2587 Error **errp)
2589 char *featurestr; /* Single 'key=value" string being parsed */
2590 static bool cpu_globals_initialized;
2591 bool ambiguous = false;
2593 if (cpu_globals_initialized) {
2594 return;
2596 cpu_globals_initialized = true;
2598 if (!features) {
2599 return;
2602 for (featurestr = strtok(features, ",");
2603 featurestr;
2604 featurestr = strtok(NULL, ",")) {
2605 const char *name;
2606 const char *val = NULL;
2607 char *eq = NULL;
2608 char num[32];
2609 GlobalProperty *prop;
2611 /* Compatibility syntax: */
2612 if (featurestr[0] == '+') {
2613 plus_features = g_list_append(plus_features,
2614 g_strdup(featurestr + 1));
2615 continue;
2616 } else if (featurestr[0] == '-') {
2617 minus_features = g_list_append(minus_features,
2618 g_strdup(featurestr + 1));
2619 continue;
2622 eq = strchr(featurestr, '=');
2623 if (eq) {
2624 *eq++ = 0;
2625 val = eq;
2626 } else {
2627 val = "on";
2630 feat2prop(featurestr);
2631 name = featurestr;
2633 if (g_list_find_custom(plus_features, name, compare_string)) {
2634 warn_report("Ambiguous CPU model string. "
2635 "Don't mix both \"+%s\" and \"%s=%s\"",
2636 name, name, val);
2637 ambiguous = true;
2639 if (g_list_find_custom(minus_features, name, compare_string)) {
2640 warn_report("Ambiguous CPU model string. "
2641 "Don't mix both \"-%s\" and \"%s=%s\"",
2642 name, name, val);
2643 ambiguous = true;
2646 /* Special case: */
2647 if (!strcmp(name, "tsc-freq")) {
2648 int ret;
2649 uint64_t tsc_freq;
2651 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2652 if (ret < 0 || tsc_freq > INT64_MAX) {
2653 error_setg(errp, "bad numerical value %s", val);
2654 return;
2656 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2657 val = num;
2658 name = "tsc-frequency";
2661 prop = g_new0(typeof(*prop), 1);
2662 prop->driver = typename;
2663 prop->property = g_strdup(name);
2664 prop->value = g_strdup(val);
2665 prop->errp = &error_fatal;
2666 qdev_prop_register_global(prop);
2669 if (ambiguous) {
2670 warn_report("Compatibility of ambiguous CPU model "
2671 "strings won't be kept on future QEMU versions");
2675 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2676 static int x86_cpu_filter_features(X86CPU *cpu);
2678 /* Check for missing features that may prevent the CPU class from
2679 * running using the current machine and accelerator.
2681 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2682 strList **missing_feats)
2684 X86CPU *xc;
2685 FeatureWord w;
2686 Error *err = NULL;
2687 strList **next = missing_feats;
2689 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2690 strList *new = g_new0(strList, 1);
2691 new->value = g_strdup("kvm");
2692 *missing_feats = new;
2693 return;
2696 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2698 x86_cpu_expand_features(xc, &err);
2699 if (err) {
2700 /* Errors at x86_cpu_expand_features should never happen,
2701 * but in case it does, just report the model as not
2702 * runnable at all using the "type" property.
2704 strList *new = g_new0(strList, 1);
2705 new->value = g_strdup("type");
2706 *next = new;
2707 next = &new->next;
2710 x86_cpu_filter_features(xc);
2712 for (w = 0; w < FEATURE_WORDS; w++) {
2713 uint32_t filtered = xc->filtered_features[w];
2714 int i;
2715 for (i = 0; i < 32; i++) {
2716 if (filtered & (1UL << i)) {
2717 strList *new = g_new0(strList, 1);
2718 new->value = g_strdup(x86_cpu_feature_name(w, i));
2719 *next = new;
2720 next = &new->next;
2725 object_unref(OBJECT(xc));
2728 /* Print all cpuid feature names in featureset
2730 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2732 int bit;
2733 bool first = true;
2735 for (bit = 0; bit < 32; bit++) {
2736 if (featureset[bit]) {
2737 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2738 first = false;
2743 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2744 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2746 ObjectClass *class_a = (ObjectClass *)a;
2747 ObjectClass *class_b = (ObjectClass *)b;
2748 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2749 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2750 const char *name_a, *name_b;
2752 if (cc_a->ordering != cc_b->ordering) {
2753 return cc_a->ordering - cc_b->ordering;
2754 } else {
2755 name_a = object_class_get_name(class_a);
2756 name_b = object_class_get_name(class_b);
2757 return strcmp(name_a, name_b);
2761 static GSList *get_sorted_cpu_model_list(void)
2763 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2764 list = g_slist_sort(list, x86_cpu_list_compare);
2765 return list;
2768 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2770 ObjectClass *oc = data;
2771 X86CPUClass *cc = X86_CPU_CLASS(oc);
2772 CPUListState *s = user_data;
2773 char *name = x86_cpu_class_get_model_name(cc);
2774 const char *desc = cc->model_description;
2775 if (!desc && cc->cpu_def) {
2776 desc = cc->cpu_def->model_id;
2779 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2780 name, desc);
2781 g_free(name);
2784 /* list available CPU models and flags */
2785 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2787 int i;
2788 CPUListState s = {
2789 .file = f,
2790 .cpu_fprintf = cpu_fprintf,
2792 GSList *list;
2794 (*cpu_fprintf)(f, "Available CPUs:\n");
2795 list = get_sorted_cpu_model_list();
2796 g_slist_foreach(list, x86_cpu_list_entry, &s);
2797 g_slist_free(list);
2799 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2800 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2801 FeatureWordInfo *fw = &feature_word_info[i];
2803 (*cpu_fprintf)(f, " ");
2804 listflags(f, cpu_fprintf, fw->feat_names);
2805 (*cpu_fprintf)(f, "\n");
2809 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2811 ObjectClass *oc = data;
2812 X86CPUClass *cc = X86_CPU_CLASS(oc);
2813 CpuDefinitionInfoList **cpu_list = user_data;
2814 CpuDefinitionInfoList *entry;
2815 CpuDefinitionInfo *info;
2817 info = g_malloc0(sizeof(*info));
2818 info->name = x86_cpu_class_get_model_name(cc);
2819 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2820 info->has_unavailable_features = true;
2821 info->q_typename = g_strdup(object_class_get_name(oc));
2822 info->migration_safe = cc->migration_safe;
2823 info->has_migration_safe = true;
2824 info->q_static = cc->static_model;
2826 entry = g_malloc0(sizeof(*entry));
2827 entry->value = info;
2828 entry->next = *cpu_list;
2829 *cpu_list = entry;
2832 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2834 CpuDefinitionInfoList *cpu_list = NULL;
2835 GSList *list = get_sorted_cpu_model_list();
2836 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2837 g_slist_free(list);
2838 return cpu_list;
2841 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2842 bool migratable_only)
2844 FeatureWordInfo *wi = &feature_word_info[w];
2845 uint32_t r;
2847 if (kvm_enabled()) {
2848 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2849 wi->cpuid_ecx,
2850 wi->cpuid_reg);
2851 } else if (hvf_enabled()) {
2852 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2853 wi->cpuid_ecx,
2854 wi->cpuid_reg);
2855 } else if (tcg_enabled()) {
2856 r = wi->tcg_features;
2857 } else {
2858 return ~0;
2860 if (migratable_only) {
2861 r &= x86_cpu_get_migratable_flags(w);
2863 return r;
2866 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2868 FeatureWord w;
2870 for (w = 0; w < FEATURE_WORDS; w++) {
2871 report_unavailable_features(w, cpu->filtered_features[w]);
2875 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2877 PropValue *pv;
2878 for (pv = props; pv->prop; pv++) {
2879 if (!pv->value) {
2880 continue;
2882 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2883 &error_abort);
2887 /* Load data from X86CPUDefinition into a X86CPU object
2889 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2891 CPUX86State *env = &cpu->env;
2892 const char *vendor;
2893 char host_vendor[CPUID_VENDOR_SZ + 1];
2894 FeatureWord w;
2896 /*NOTE: any property set by this function should be returned by
2897 * x86_cpu_static_props(), so static expansion of
2898 * query-cpu-model-expansion is always complete.
2901 /* CPU models only set _minimum_ values for level/xlevel: */
2902 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2903 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2905 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2906 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2907 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2908 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2909 for (w = 0; w < FEATURE_WORDS; w++) {
2910 env->features[w] = def->features[w];
2913 /* Special cases not set in the X86CPUDefinition structs: */
2914 /* TODO: in-kernel irqchip for hvf */
2915 if (kvm_enabled()) {
2916 if (!kvm_irqchip_in_kernel()) {
2917 x86_cpu_change_kvm_default("x2apic", "off");
2920 x86_cpu_apply_props(cpu, kvm_default_props);
2921 } else if (tcg_enabled()) {
2922 x86_cpu_apply_props(cpu, tcg_default_props);
2925 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2927 /* sysenter isn't supported in compatibility mode on AMD,
2928 * syscall isn't supported in compatibility mode on Intel.
2929 * Normally we advertise the actual CPU vendor, but you can
2930 * override this using the 'vendor' property if you want to use
2931 * KVM's sysenter/syscall emulation in compatibility mode and
2932 * when doing cross vendor migration
2934 vendor = def->vendor;
2935 if (accel_uses_host_cpuid()) {
2936 uint32_t ebx = 0, ecx = 0, edx = 0;
2937 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2938 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2939 vendor = host_vendor;
2942 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2946 /* Return a QDict containing keys for all properties that can be included
2947 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2948 * must be included in the dictionary.
2950 static QDict *x86_cpu_static_props(void)
2952 FeatureWord w;
2953 int i;
2954 static const char *props[] = {
2955 "min-level",
2956 "min-xlevel",
2957 "family",
2958 "model",
2959 "stepping",
2960 "model-id",
2961 "vendor",
2962 "lmce",
2963 NULL,
2965 static QDict *d;
2967 if (d) {
2968 return d;
2971 d = qdict_new();
2972 for (i = 0; props[i]; i++) {
2973 qdict_put_null(d, props[i]);
2976 for (w = 0; w < FEATURE_WORDS; w++) {
2977 FeatureWordInfo *fi = &feature_word_info[w];
2978 int bit;
2979 for (bit = 0; bit < 32; bit++) {
2980 if (!fi->feat_names[bit]) {
2981 continue;
2983 qdict_put_null(d, fi->feat_names[bit]);
2987 return d;
2990 /* Add an entry to @props dict, with the value for property. */
2991 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2993 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2994 &error_abort);
2996 qdict_put_obj(props, prop, value);
2999 /* Convert CPU model data from X86CPU object to a property dictionary
3000 * that can recreate exactly the same CPU model.
3002 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3004 QDict *sprops = x86_cpu_static_props();
3005 const QDictEntry *e;
3007 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3008 const char *prop = qdict_entry_key(e);
3009 x86_cpu_expand_prop(cpu, props, prop);
3013 /* Convert CPU model data from X86CPU object to a property dictionary
3014 * that can recreate exactly the same CPU model, including every
3015 * writeable QOM property.
3017 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3019 ObjectPropertyIterator iter;
3020 ObjectProperty *prop;
3022 object_property_iter_init(&iter, OBJECT(cpu));
3023 while ((prop = object_property_iter_next(&iter))) {
3024 /* skip read-only or write-only properties */
3025 if (!prop->get || !prop->set) {
3026 continue;
3029 /* "hotplugged" is the only property that is configurable
3030 * on the command-line but will be set differently on CPUs
3031 * created using "-cpu ... -smp ..." and by CPUs created
3032 * on the fly by x86_cpu_from_model() for querying. Skip it.
3034 if (!strcmp(prop->name, "hotplugged")) {
3035 continue;
3037 x86_cpu_expand_prop(cpu, props, prop->name);
3041 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3043 const QDictEntry *prop;
3044 Error *err = NULL;
3046 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3047 object_property_set_qobject(obj, qdict_entry_value(prop),
3048 qdict_entry_key(prop), &err);
3049 if (err) {
3050 break;
3054 error_propagate(errp, err);
3057 /* Create X86CPU object according to model+props specification */
3058 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3060 X86CPU *xc = NULL;
3061 X86CPUClass *xcc;
3062 Error *err = NULL;
3064 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3065 if (xcc == NULL) {
3066 error_setg(&err, "CPU model '%s' not found", model);
3067 goto out;
3070 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3071 if (props) {
3072 object_apply_props(OBJECT(xc), props, &err);
3073 if (err) {
3074 goto out;
3078 x86_cpu_expand_features(xc, &err);
3079 if (err) {
3080 goto out;
3083 out:
3084 if (err) {
3085 error_propagate(errp, err);
3086 object_unref(OBJECT(xc));
3087 xc = NULL;
3089 return xc;
3092 CpuModelExpansionInfo *
3093 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3094 CpuModelInfo *model,
3095 Error **errp)
3097 X86CPU *xc = NULL;
3098 Error *err = NULL;
3099 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3100 QDict *props = NULL;
3101 const char *base_name;
3103 xc = x86_cpu_from_model(model->name,
3104 model->has_props ?
3105 qobject_to_qdict(model->props) :
3106 NULL, &err);
3107 if (err) {
3108 goto out;
3111 props = qdict_new();
3113 switch (type) {
3114 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3115 /* Static expansion will be based on "base" only */
3116 base_name = "base";
3117 x86_cpu_to_dict(xc, props);
3118 break;
3119 case CPU_MODEL_EXPANSION_TYPE_FULL:
3120 /* As we don't return every single property, full expansion needs
3121 * to keep the original model name+props, and add extra
3122 * properties on top of that.
3124 base_name = model->name;
3125 x86_cpu_to_dict_full(xc, props);
3126 break;
3127 default:
3128 error_setg(&err, "Unsupportted expansion type");
3129 goto out;
3132 if (!props) {
3133 props = qdict_new();
3135 x86_cpu_to_dict(xc, props);
3137 ret->model = g_new0(CpuModelInfo, 1);
3138 ret->model->name = g_strdup(base_name);
3139 ret->model->props = QOBJECT(props);
3140 ret->model->has_props = true;
3142 out:
3143 object_unref(OBJECT(xc));
3144 if (err) {
3145 error_propagate(errp, err);
3146 qapi_free_CpuModelExpansionInfo(ret);
3147 ret = NULL;
3149 return ret;
3152 static gchar *x86_gdb_arch_name(CPUState *cs)
3154 #ifdef TARGET_X86_64
3155 return g_strdup("i386:x86-64");
3156 #else
3157 return g_strdup("i386");
3158 #endif
3161 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3163 X86CPUDefinition *cpudef = data;
3164 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3166 xcc->cpu_def = cpudef;
3167 xcc->migration_safe = true;
3170 static void x86_register_cpudef_type(X86CPUDefinition *def)
3172 char *typename = x86_cpu_type_name(def->name);
3173 TypeInfo ti = {
3174 .name = typename,
3175 .parent = TYPE_X86_CPU,
3176 .class_init = x86_cpu_cpudef_class_init,
3177 .class_data = def,
3180 /* AMD aliases are handled at runtime based on CPUID vendor, so
3181 * they shouldn't be set on the CPU model table.
3183 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3184 /* catch mistakes instead of silently truncating model_id when too long */
3185 assert(def->model_id && strlen(def->model_id) <= 48);
3188 type_register(&ti);
3189 g_free(typename);
3192 #if !defined(CONFIG_USER_ONLY)
3194 void cpu_clear_apic_feature(CPUX86State *env)
3196 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3199 #endif /* !CONFIG_USER_ONLY */
3201 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3202 uint32_t *eax, uint32_t *ebx,
3203 uint32_t *ecx, uint32_t *edx)
3205 X86CPU *cpu = x86_env_get_cpu(env);
3206 CPUState *cs = CPU(cpu);
3207 uint32_t pkg_offset;
3208 uint32_t limit;
3209 uint32_t signature[3];
3211 /* Calculate & apply limits for different index ranges */
3212 if (index >= 0xC0000000) {
3213 limit = env->cpuid_xlevel2;
3214 } else if (index >= 0x80000000) {
3215 limit = env->cpuid_xlevel;
3216 } else if (index >= 0x40000000) {
3217 limit = 0x40000001;
3218 } else {
3219 limit = env->cpuid_level;
3222 if (index > limit) {
3223 /* Intel documentation states that invalid EAX input will
3224 * return the same information as EAX=cpuid_level
3225 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3227 index = env->cpuid_level;
3230 switch(index) {
3231 case 0:
3232 *eax = env->cpuid_level;
3233 *ebx = env->cpuid_vendor1;
3234 *edx = env->cpuid_vendor2;
3235 *ecx = env->cpuid_vendor3;
3236 break;
3237 case 1:
3238 *eax = env->cpuid_version;
3239 *ebx = (cpu->apic_id << 24) |
3240 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3241 *ecx = env->features[FEAT_1_ECX];
3242 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3243 *ecx |= CPUID_EXT_OSXSAVE;
3245 *edx = env->features[FEAT_1_EDX];
3246 if (cs->nr_cores * cs->nr_threads > 1) {
3247 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3248 *edx |= CPUID_HT;
3250 break;
3251 case 2:
3252 /* cache info: needed for Pentium Pro compatibility */
3253 if (cpu->cache_info_passthrough) {
3254 host_cpuid(index, 0, eax, ebx, ecx, edx);
3255 break;
3257 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3258 *ebx = 0;
3259 if (!cpu->enable_l3_cache) {
3260 *ecx = 0;
3261 } else {
3262 *ecx = L3_N_DESCRIPTOR;
3264 *edx = (L1D_DESCRIPTOR << 16) | \
3265 (L1I_DESCRIPTOR << 8) | \
3266 (L2_DESCRIPTOR);
3267 break;
3268 case 4:
3269 /* cache info: needed for Core compatibility */
3270 if (cpu->cache_info_passthrough) {
3271 host_cpuid(index, count, eax, ebx, ecx, edx);
3272 *eax &= ~0xFC000000;
3273 } else {
3274 *eax = 0;
3275 switch (count) {
3276 case 0: /* L1 dcache info */
3277 *eax |= CPUID_4_TYPE_DCACHE | \
3278 CPUID_4_LEVEL(1) | \
3279 CPUID_4_SELF_INIT_LEVEL;
3280 *ebx = (L1D_LINE_SIZE - 1) | \
3281 ((L1D_PARTITIONS - 1) << 12) | \
3282 ((L1D_ASSOCIATIVITY - 1) << 22);
3283 *ecx = L1D_SETS - 1;
3284 *edx = CPUID_4_NO_INVD_SHARING;
3285 break;
3286 case 1: /* L1 icache info */
3287 *eax |= CPUID_4_TYPE_ICACHE | \
3288 CPUID_4_LEVEL(1) | \
3289 CPUID_4_SELF_INIT_LEVEL;
3290 *ebx = (L1I_LINE_SIZE - 1) | \
3291 ((L1I_PARTITIONS - 1) << 12) | \
3292 ((L1I_ASSOCIATIVITY - 1) << 22);
3293 *ecx = L1I_SETS - 1;
3294 *edx = CPUID_4_NO_INVD_SHARING;
3295 break;
3296 case 2: /* L2 cache info */
3297 *eax |= CPUID_4_TYPE_UNIFIED | \
3298 CPUID_4_LEVEL(2) | \
3299 CPUID_4_SELF_INIT_LEVEL;
3300 if (cs->nr_threads > 1) {
3301 *eax |= (cs->nr_threads - 1) << 14;
3303 *ebx = (L2_LINE_SIZE - 1) | \
3304 ((L2_PARTITIONS - 1) << 12) | \
3305 ((L2_ASSOCIATIVITY - 1) << 22);
3306 *ecx = L2_SETS - 1;
3307 *edx = CPUID_4_NO_INVD_SHARING;
3308 break;
3309 case 3: /* L3 cache info */
3310 if (!cpu->enable_l3_cache) {
3311 *eax = 0;
3312 *ebx = 0;
3313 *ecx = 0;
3314 *edx = 0;
3315 break;
3317 *eax |= CPUID_4_TYPE_UNIFIED | \
3318 CPUID_4_LEVEL(3) | \
3319 CPUID_4_SELF_INIT_LEVEL;
3320 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3321 *eax |= ((1 << pkg_offset) - 1) << 14;
3322 *ebx = (L3_N_LINE_SIZE - 1) | \
3323 ((L3_N_PARTITIONS - 1) << 12) | \
3324 ((L3_N_ASSOCIATIVITY - 1) << 22);
3325 *ecx = L3_N_SETS - 1;
3326 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3327 break;
3328 default: /* end of info */
3329 *eax = 0;
3330 *ebx = 0;
3331 *ecx = 0;
3332 *edx = 0;
3333 break;
3337 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3338 if ((*eax & 31) && cs->nr_cores > 1) {
3339 *eax |= (cs->nr_cores - 1) << 26;
3341 break;
3342 case 5:
3343 /* mwait info: needed for Core compatibility */
3344 *eax = 0; /* Smallest monitor-line size in bytes */
3345 *ebx = 0; /* Largest monitor-line size in bytes */
3346 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3347 *edx = 0;
3348 break;
3349 case 6:
3350 /* Thermal and Power Leaf */
3351 *eax = env->features[FEAT_6_EAX];
3352 *ebx = 0;
3353 *ecx = 0;
3354 *edx = 0;
3355 break;
3356 case 7:
3357 /* Structured Extended Feature Flags Enumeration Leaf */
3358 if (count == 0) {
3359 *eax = 0; /* Maximum ECX value for sub-leaves */
3360 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3361 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3362 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3363 *ecx |= CPUID_7_0_ECX_OSPKE;
3365 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3366 } else {
3367 *eax = 0;
3368 *ebx = 0;
3369 *ecx = 0;
3370 *edx = 0;
3372 break;
3373 case 9:
3374 /* Direct Cache Access Information Leaf */
3375 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3376 *ebx = 0;
3377 *ecx = 0;
3378 *edx = 0;
3379 break;
3380 case 0xA:
3381 /* Architectural Performance Monitoring Leaf */
3382 if (kvm_enabled() && cpu->enable_pmu) {
3383 KVMState *s = cs->kvm_state;
3385 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3386 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3387 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3388 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3389 } else if (hvf_enabled() && cpu->enable_pmu) {
3390 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3391 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3392 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3393 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3394 } else {
3395 *eax = 0;
3396 *ebx = 0;
3397 *ecx = 0;
3398 *edx = 0;
3400 break;
3401 case 0xB:
3402 /* Extended Topology Enumeration Leaf */
3403 if (!cpu->enable_cpuid_0xb) {
3404 *eax = *ebx = *ecx = *edx = 0;
3405 break;
3408 *ecx = count & 0xff;
3409 *edx = cpu->apic_id;
3411 switch (count) {
3412 case 0:
3413 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3414 *ebx = cs->nr_threads;
3415 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3416 break;
3417 case 1:
3418 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3419 *ebx = cs->nr_cores * cs->nr_threads;
3420 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3421 break;
3422 default:
3423 *eax = 0;
3424 *ebx = 0;
3425 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3428 assert(!(*eax & ~0x1f));
3429 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3430 break;
3431 case 0xD: {
3432 /* Processor Extended State */
3433 *eax = 0;
3434 *ebx = 0;
3435 *ecx = 0;
3436 *edx = 0;
3437 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3438 break;
3441 if (count == 0) {
3442 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3443 *eax = env->features[FEAT_XSAVE_COMP_LO];
3444 *edx = env->features[FEAT_XSAVE_COMP_HI];
3445 *ebx = *ecx;
3446 } else if (count == 1) {
3447 *eax = env->features[FEAT_XSAVE];
3448 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3449 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3450 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3451 *eax = esa->size;
3452 *ebx = esa->offset;
3455 break;
3457 case 0x40000000:
3459 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3460 * set here, but we restrict to TCG none the less.
3462 if (tcg_enabled() && cpu->expose_tcg) {
3463 memcpy(signature, "TCGTCGTCGTCG", 12);
3464 *eax = 0x40000001;
3465 *ebx = signature[0];
3466 *ecx = signature[1];
3467 *edx = signature[2];
3468 } else {
3469 *eax = 0;
3470 *ebx = 0;
3471 *ecx = 0;
3472 *edx = 0;
3474 break;
3475 case 0x40000001:
3476 *eax = 0;
3477 *ebx = 0;
3478 *ecx = 0;
3479 *edx = 0;
3480 break;
3481 case 0x80000000:
3482 *eax = env->cpuid_xlevel;
3483 *ebx = env->cpuid_vendor1;
3484 *edx = env->cpuid_vendor2;
3485 *ecx = env->cpuid_vendor3;
3486 break;
3487 case 0x80000001:
3488 *eax = env->cpuid_version;
3489 *ebx = 0;
3490 *ecx = env->features[FEAT_8000_0001_ECX];
3491 *edx = env->features[FEAT_8000_0001_EDX];
3493 /* The Linux kernel checks for the CMPLegacy bit and
3494 * discards multiple thread information if it is set.
3495 * So don't set it here for Intel to make Linux guests happy.
3497 if (cs->nr_cores * cs->nr_threads > 1) {
3498 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3499 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3500 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3501 *ecx |= 1 << 1; /* CmpLegacy bit */
3504 break;
3505 case 0x80000002:
3506 case 0x80000003:
3507 case 0x80000004:
3508 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3509 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3510 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3511 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3512 break;
3513 case 0x80000005:
3514 /* cache info (L1 cache) */
3515 if (cpu->cache_info_passthrough) {
3516 host_cpuid(index, 0, eax, ebx, ecx, edx);
3517 break;
3519 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3520 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3521 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3522 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3523 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3524 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3525 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3526 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3527 break;
3528 case 0x80000006:
3529 /* cache info (L2 cache) */
3530 if (cpu->cache_info_passthrough) {
3531 host_cpuid(index, 0, eax, ebx, ecx, edx);
3532 break;
3534 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3535 (L2_DTLB_2M_ENTRIES << 16) | \
3536 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3537 (L2_ITLB_2M_ENTRIES);
3538 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3539 (L2_DTLB_4K_ENTRIES << 16) | \
3540 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3541 (L2_ITLB_4K_ENTRIES);
3542 *ecx = (L2_SIZE_KB_AMD << 16) | \
3543 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3544 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3545 if (!cpu->enable_l3_cache) {
3546 *edx = ((L3_SIZE_KB / 512) << 18) | \
3547 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3548 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3549 } else {
3550 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3551 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3552 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3554 break;
3555 case 0x80000007:
3556 *eax = 0;
3557 *ebx = 0;
3558 *ecx = 0;
3559 *edx = env->features[FEAT_8000_0007_EDX];
3560 break;
3561 case 0x80000008:
3562 /* virtual & phys address size in low 2 bytes. */
3563 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3564 /* 64 bit processor */
3565 *eax = cpu->phys_bits; /* configurable physical bits */
3566 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3567 *eax |= 0x00003900; /* 57 bits virtual */
3568 } else {
3569 *eax |= 0x00003000; /* 48 bits virtual */
3571 } else {
3572 *eax = cpu->phys_bits;
3574 *ebx = env->features[FEAT_8000_0008_EBX];
3575 *ecx = 0;
3576 *edx = 0;
3577 if (cs->nr_cores * cs->nr_threads > 1) {
3578 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3580 break;
3581 case 0x8000000A:
3582 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3583 *eax = 0x00000001; /* SVM Revision */
3584 *ebx = 0x00000010; /* nr of ASIDs */
3585 *ecx = 0;
3586 *edx = env->features[FEAT_SVM]; /* optional features */
3587 } else {
3588 *eax = 0;
3589 *ebx = 0;
3590 *ecx = 0;
3591 *edx = 0;
3593 break;
3594 case 0xC0000000:
3595 *eax = env->cpuid_xlevel2;
3596 *ebx = 0;
3597 *ecx = 0;
3598 *edx = 0;
3599 break;
3600 case 0xC0000001:
3601 /* Support for VIA CPU's CPUID instruction */
3602 *eax = env->cpuid_version;
3603 *ebx = 0;
3604 *ecx = 0;
3605 *edx = env->features[FEAT_C000_0001_EDX];
3606 break;
3607 case 0xC0000002:
3608 case 0xC0000003:
3609 case 0xC0000004:
3610 /* Reserved for the future, and now filled with zero */
3611 *eax = 0;
3612 *ebx = 0;
3613 *ecx = 0;
3614 *edx = 0;
3615 break;
3616 case 0x8000001F:
3617 *eax = sev_enabled() ? 0x2 : 0;
3618 *ebx = sev_get_cbit_position();
3619 *ebx |= sev_get_reduced_phys_bits() << 6;
3620 *ecx = 0;
3621 *edx = 0;
3622 break;
3623 default:
3624 /* reserved values: zero */
3625 *eax = 0;
3626 *ebx = 0;
3627 *ecx = 0;
3628 *edx = 0;
3629 break;
3633 /* CPUClass::reset() */
3634 static void x86_cpu_reset(CPUState *s)
3636 X86CPU *cpu = X86_CPU(s);
3637 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3638 CPUX86State *env = &cpu->env;
3639 target_ulong cr4;
3640 uint64_t xcr0;
3641 int i;
3643 xcc->parent_reset(s);
3645 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3647 env->old_exception = -1;
3649 /* init to reset state */
3651 env->hflags2 |= HF2_GIF_MASK;
3653 cpu_x86_update_cr0(env, 0x60000010);
3654 env->a20_mask = ~0x0;
3655 env->smbase = 0x30000;
3656 env->msr_smi_count = 0;
3658 env->idt.limit = 0xffff;
3659 env->gdt.limit = 0xffff;
3660 env->ldt.limit = 0xffff;
3661 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3662 env->tr.limit = 0xffff;
3663 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3665 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3666 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3667 DESC_R_MASK | DESC_A_MASK);
3668 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3669 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3670 DESC_A_MASK);
3671 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3672 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3673 DESC_A_MASK);
3674 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3675 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3676 DESC_A_MASK);
3677 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3678 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3679 DESC_A_MASK);
3680 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3681 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3682 DESC_A_MASK);
3684 env->eip = 0xfff0;
3685 env->regs[R_EDX] = env->cpuid_version;
3687 env->eflags = 0x2;
3689 /* FPU init */
3690 for (i = 0; i < 8; i++) {
3691 env->fptags[i] = 1;
3693 cpu_set_fpuc(env, 0x37f);
3695 env->mxcsr = 0x1f80;
3696 /* All units are in INIT state. */
3697 env->xstate_bv = 0;
3699 env->pat = 0x0007040600070406ULL;
3700 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3702 memset(env->dr, 0, sizeof(env->dr));
3703 env->dr[6] = DR6_FIXED_1;
3704 env->dr[7] = DR7_FIXED_1;
3705 cpu_breakpoint_remove_all(s, BP_CPU);
3706 cpu_watchpoint_remove_all(s, BP_CPU);
3708 cr4 = 0;
3709 xcr0 = XSTATE_FP_MASK;
3711 #ifdef CONFIG_USER_ONLY
3712 /* Enable all the features for user-mode. */
3713 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3714 xcr0 |= XSTATE_SSE_MASK;
3716 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3717 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3718 if (env->features[esa->feature] & esa->bits) {
3719 xcr0 |= 1ull << i;
3723 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3724 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3726 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3727 cr4 |= CR4_FSGSBASE_MASK;
3729 #endif
3731 env->xcr0 = xcr0;
3732 cpu_x86_update_cr4(env, cr4);
3735 * SDM 11.11.5 requires:
3736 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3737 * - IA32_MTRR_PHYSMASKn.V = 0
3738 * All other bits are undefined. For simplification, zero it all.
3740 env->mtrr_deftype = 0;
3741 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3742 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3744 env->interrupt_injected = -1;
3745 env->exception_injected = -1;
3746 env->nmi_injected = false;
3747 #if !defined(CONFIG_USER_ONLY)
3748 /* We hard-wire the BSP to the first CPU. */
3749 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3751 s->halted = !cpu_is_bsp(cpu);
3753 if (kvm_enabled()) {
3754 kvm_arch_reset_vcpu(cpu);
3756 else if (hvf_enabled()) {
3757 hvf_reset_vcpu(s);
3759 #endif
3762 #ifndef CONFIG_USER_ONLY
3763 bool cpu_is_bsp(X86CPU *cpu)
3765 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3768 /* TODO: remove me, when reset over QOM tree is implemented */
3769 static void x86_cpu_machine_reset_cb(void *opaque)
3771 X86CPU *cpu = opaque;
3772 cpu_reset(CPU(cpu));
3774 #endif
3776 static void mce_init(X86CPU *cpu)
3778 CPUX86State *cenv = &cpu->env;
3779 unsigned int bank;
3781 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3782 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3783 (CPUID_MCE | CPUID_MCA)) {
3784 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3785 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3786 cenv->mcg_ctl = ~(uint64_t)0;
3787 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3788 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3793 #ifndef CONFIG_USER_ONLY
3794 APICCommonClass *apic_get_class(void)
3796 const char *apic_type = "apic";
3798 /* TODO: in-kernel irqchip for hvf */
3799 if (kvm_apic_in_kernel()) {
3800 apic_type = "kvm-apic";
3801 } else if (xen_enabled()) {
3802 apic_type = "xen-apic";
3805 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3808 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3810 APICCommonState *apic;
3811 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3813 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3815 object_property_add_child(OBJECT(cpu), "lapic",
3816 OBJECT(cpu->apic_state), &error_abort);
3817 object_unref(OBJECT(cpu->apic_state));
3819 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3820 /* TODO: convert to link<> */
3821 apic = APIC_COMMON(cpu->apic_state);
3822 apic->cpu = cpu;
3823 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3826 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3828 APICCommonState *apic;
3829 static bool apic_mmio_map_once;
3831 if (cpu->apic_state == NULL) {
3832 return;
3834 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3835 errp);
3837 /* Map APIC MMIO area */
3838 apic = APIC_COMMON(cpu->apic_state);
3839 if (!apic_mmio_map_once) {
3840 memory_region_add_subregion_overlap(get_system_memory(),
3841 apic->apicbase &
3842 MSR_IA32_APICBASE_BASE,
3843 &apic->io_memory,
3844 0x1000);
3845 apic_mmio_map_once = true;
3849 static void x86_cpu_machine_done(Notifier *n, void *unused)
3851 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3852 MemoryRegion *smram =
3853 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3855 if (smram) {
3856 cpu->smram = g_new(MemoryRegion, 1);
3857 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3858 smram, 0, 1ull << 32);
3859 memory_region_set_enabled(cpu->smram, true);
3860 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3863 #else
3864 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3867 #endif
3869 /* Note: Only safe for use on x86(-64) hosts */
3870 static uint32_t x86_host_phys_bits(void)
3872 uint32_t eax;
3873 uint32_t host_phys_bits;
3875 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3876 if (eax >= 0x80000008) {
3877 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3878 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3879 * at 23:16 that can specify a maximum physical address bits for
3880 * the guest that can override this value; but I've not seen
3881 * anything with that set.
3883 host_phys_bits = eax & 0xff;
3884 } else {
3885 /* It's an odd 64 bit machine that doesn't have the leaf for
3886 * physical address bits; fall back to 36 that's most older
3887 * Intel.
3889 host_phys_bits = 36;
3892 return host_phys_bits;
3895 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3897 if (*min < value) {
3898 *min = value;
3902 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3903 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3905 CPUX86State *env = &cpu->env;
3906 FeatureWordInfo *fi = &feature_word_info[w];
3907 uint32_t eax = fi->cpuid_eax;
3908 uint32_t region = eax & 0xF0000000;
3910 if (!env->features[w]) {
3911 return;
3914 switch (region) {
3915 case 0x00000000:
3916 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3917 break;
3918 case 0x80000000:
3919 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3920 break;
3921 case 0xC0000000:
3922 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3923 break;
3927 /* Calculate XSAVE components based on the configured CPU feature flags */
3928 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3930 CPUX86State *env = &cpu->env;
3931 int i;
3932 uint64_t mask;
3934 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3935 return;
3938 mask = 0;
3939 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3940 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3941 if (env->features[esa->feature] & esa->bits) {
3942 mask |= (1ULL << i);
3946 env->features[FEAT_XSAVE_COMP_LO] = mask;
3947 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3950 /***** Steps involved on loading and filtering CPUID data
3952 * When initializing and realizing a CPU object, the steps
3953 * involved in setting up CPUID data are:
3955 * 1) Loading CPU model definition (X86CPUDefinition). This is
3956 * implemented by x86_cpu_load_def() and should be completely
3957 * transparent, as it is done automatically by instance_init.
3958 * No code should need to look at X86CPUDefinition structs
3959 * outside instance_init.
3961 * 2) CPU expansion. This is done by realize before CPUID
3962 * filtering, and will make sure host/accelerator data is
3963 * loaded for CPU models that depend on host capabilities
3964 * (e.g. "host"). Done by x86_cpu_expand_features().
3966 * 3) CPUID filtering. This initializes extra data related to
3967 * CPUID, and checks if the host supports all capabilities
3968 * required by the CPU. Runnability of a CPU model is
3969 * determined at this step. Done by x86_cpu_filter_features().
3971 * Some operations don't require all steps to be performed.
3972 * More precisely:
3974 * - CPU instance creation (instance_init) will run only CPU
3975 * model loading. CPU expansion can't run at instance_init-time
3976 * because host/accelerator data may be not available yet.
3977 * - CPU realization will perform both CPU model expansion and CPUID
3978 * filtering, and return an error in case one of them fails.
3979 * - query-cpu-definitions needs to run all 3 steps. It needs
3980 * to run CPUID filtering, as the 'unavailable-features'
3981 * field is set based on the filtering results.
3982 * - The query-cpu-model-expansion QMP command only needs to run
3983 * CPU model loading and CPU expansion. It should not filter
3984 * any CPUID data based on host capabilities.
3987 /* Expand CPU configuration data, based on configured features
3988 * and host/accelerator capabilities when appropriate.
3990 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3992 CPUX86State *env = &cpu->env;
3993 FeatureWord w;
3994 GList *l;
3995 Error *local_err = NULL;
3997 /*TODO: Now cpu->max_features doesn't overwrite features
3998 * set using QOM properties, and we can convert
3999 * plus_features & minus_features to global properties
4000 * inside x86_cpu_parse_featurestr() too.
4002 if (cpu->max_features) {
4003 for (w = 0; w < FEATURE_WORDS; w++) {
4004 /* Override only features that weren't set explicitly
4005 * by the user.
4007 env->features[w] |=
4008 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4009 ~env->user_features[w];
4013 for (l = plus_features; l; l = l->next) {
4014 const char *prop = l->data;
4015 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4016 if (local_err) {
4017 goto out;
4021 for (l = minus_features; l; l = l->next) {
4022 const char *prop = l->data;
4023 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4024 if (local_err) {
4025 goto out;
4029 if (!kvm_enabled() || !cpu->expose_kvm) {
4030 env->features[FEAT_KVM] = 0;
4033 x86_cpu_enable_xsave_components(cpu);
4035 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4036 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4037 if (cpu->full_cpuid_auto_level) {
4038 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4039 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4040 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4041 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4042 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4043 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4044 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4045 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4046 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4047 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4048 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4049 /* SVM requires CPUID[0x8000000A] */
4050 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4051 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4054 /* SEV requires CPUID[0x8000001F] */
4055 if (sev_enabled()) {
4056 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4060 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4061 if (env->cpuid_level == UINT32_MAX) {
4062 env->cpuid_level = env->cpuid_min_level;
4064 if (env->cpuid_xlevel == UINT32_MAX) {
4065 env->cpuid_xlevel = env->cpuid_min_xlevel;
4067 if (env->cpuid_xlevel2 == UINT32_MAX) {
4068 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4071 out:
4072 if (local_err != NULL) {
4073 error_propagate(errp, local_err);
4078 * Finishes initialization of CPUID data, filters CPU feature
4079 * words based on host availability of each feature.
4081 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4083 static int x86_cpu_filter_features(X86CPU *cpu)
4085 CPUX86State *env = &cpu->env;
4086 FeatureWord w;
4087 int rv = 0;
4089 for (w = 0; w < FEATURE_WORDS; w++) {
4090 uint32_t host_feat =
4091 x86_cpu_get_supported_feature_word(w, false);
4092 uint32_t requested_features = env->features[w];
4093 env->features[w] &= host_feat;
4094 cpu->filtered_features[w] = requested_features & ~env->features[w];
4095 if (cpu->filtered_features[w]) {
4096 rv = 1;
4100 return rv;
4103 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4104 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4105 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4106 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4107 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4108 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4109 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4111 CPUState *cs = CPU(dev);
4112 X86CPU *cpu = X86_CPU(dev);
4113 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4114 CPUX86State *env = &cpu->env;
4115 Error *local_err = NULL;
4116 static bool ht_warned;
4118 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4119 char *name = x86_cpu_class_get_model_name(xcc);
4120 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4121 g_free(name);
4122 goto out;
4125 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4126 error_setg(errp, "apic-id property was not initialized properly");
4127 return;
4130 x86_cpu_expand_features(cpu, &local_err);
4131 if (local_err) {
4132 goto out;
4135 if (x86_cpu_filter_features(cpu) &&
4136 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4137 x86_cpu_report_filtered_features(cpu);
4138 if (cpu->enforce_cpuid) {
4139 error_setg(&local_err,
4140 accel_uses_host_cpuid() ?
4141 "Host doesn't support requested features" :
4142 "TCG doesn't support requested features");
4143 goto out;
4147 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4148 * CPUID[1].EDX.
4150 if (IS_AMD_CPU(env)) {
4151 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4152 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4153 & CPUID_EXT2_AMD_ALIASES);
4156 /* For 64bit systems think about the number of physical bits to present.
4157 * ideally this should be the same as the host; anything other than matching
4158 * the host can cause incorrect guest behaviour.
4159 * QEMU used to pick the magic value of 40 bits that corresponds to
4160 * consumer AMD devices but nothing else.
4162 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4163 if (accel_uses_host_cpuid()) {
4164 uint32_t host_phys_bits = x86_host_phys_bits();
4165 static bool warned;
4167 if (cpu->host_phys_bits) {
4168 /* The user asked for us to use the host physical bits */
4169 cpu->phys_bits = host_phys_bits;
4172 /* Print a warning if the user set it to a value that's not the
4173 * host value.
4175 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4176 !warned) {
4177 warn_report("Host physical bits (%u)"
4178 " does not match phys-bits property (%u)",
4179 host_phys_bits, cpu->phys_bits);
4180 warned = true;
4183 if (cpu->phys_bits &&
4184 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4185 cpu->phys_bits < 32)) {
4186 error_setg(errp, "phys-bits should be between 32 and %u "
4187 " (but is %u)",
4188 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4189 return;
4191 } else {
4192 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4193 error_setg(errp, "TCG only supports phys-bits=%u",
4194 TCG_PHYS_ADDR_BITS);
4195 return;
4198 /* 0 means it was not explicitly set by the user (or by machine
4199 * compat_props or by the host code above). In this case, the default
4200 * is the value used by TCG (40).
4202 if (cpu->phys_bits == 0) {
4203 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4205 } else {
4206 /* For 32 bit systems don't use the user set value, but keep
4207 * phys_bits consistent with what we tell the guest.
4209 if (cpu->phys_bits != 0) {
4210 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4211 return;
4214 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4215 cpu->phys_bits = 36;
4216 } else {
4217 cpu->phys_bits = 32;
4220 cpu_exec_realizefn(cs, &local_err);
4221 if (local_err != NULL) {
4222 error_propagate(errp, local_err);
4223 return;
4226 #ifndef CONFIG_USER_ONLY
4227 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4229 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4230 x86_cpu_apic_create(cpu, &local_err);
4231 if (local_err != NULL) {
4232 goto out;
4235 #endif
4237 mce_init(cpu);
4239 #ifndef CONFIG_USER_ONLY
4240 if (tcg_enabled()) {
4241 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4242 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4244 /* Outer container... */
4245 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4246 memory_region_set_enabled(cpu->cpu_as_root, true);
4248 /* ... with two regions inside: normal system memory with low
4249 * priority, and...
4251 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4252 get_system_memory(), 0, ~0ull);
4253 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4254 memory_region_set_enabled(cpu->cpu_as_mem, true);
4256 cs->num_ases = 2;
4257 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4258 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4260 /* ... SMRAM with higher priority, linked from /machine/smram. */
4261 cpu->machine_done.notify = x86_cpu_machine_done;
4262 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4264 #endif
4266 qemu_init_vcpu(cs);
4268 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4269 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4270 * based on inputs (sockets,cores,threads), it is still better to gives
4271 * users a warning.
4273 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4274 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4276 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4277 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4278 " -smp options properly.");
4279 ht_warned = true;
4282 x86_cpu_apic_realize(cpu, &local_err);
4283 if (local_err != NULL) {
4284 goto out;
4286 cpu_reset(cs);
4288 xcc->parent_realize(dev, &local_err);
4290 out:
4291 if (local_err != NULL) {
4292 error_propagate(errp, local_err);
4293 return;
4297 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4299 X86CPU *cpu = X86_CPU(dev);
4300 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4301 Error *local_err = NULL;
4303 #ifndef CONFIG_USER_ONLY
4304 cpu_remove_sync(CPU(dev));
4305 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4306 #endif
4308 if (cpu->apic_state) {
4309 object_unparent(OBJECT(cpu->apic_state));
4310 cpu->apic_state = NULL;
4313 xcc->parent_unrealize(dev, &local_err);
4314 if (local_err != NULL) {
4315 error_propagate(errp, local_err);
4316 return;
4320 typedef struct BitProperty {
4321 FeatureWord w;
4322 uint32_t mask;
4323 } BitProperty;
4325 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4326 void *opaque, Error **errp)
4328 X86CPU *cpu = X86_CPU(obj);
4329 BitProperty *fp = opaque;
4330 uint32_t f = cpu->env.features[fp->w];
4331 bool value = (f & fp->mask) == fp->mask;
4332 visit_type_bool(v, name, &value, errp);
4335 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4336 void *opaque, Error **errp)
4338 DeviceState *dev = DEVICE(obj);
4339 X86CPU *cpu = X86_CPU(obj);
4340 BitProperty *fp = opaque;
4341 Error *local_err = NULL;
4342 bool value;
4344 if (dev->realized) {
4345 qdev_prop_set_after_realize(dev, name, errp);
4346 return;
4349 visit_type_bool(v, name, &value, &local_err);
4350 if (local_err) {
4351 error_propagate(errp, local_err);
4352 return;
4355 if (value) {
4356 cpu->env.features[fp->w] |= fp->mask;
4357 } else {
4358 cpu->env.features[fp->w] &= ~fp->mask;
4360 cpu->env.user_features[fp->w] |= fp->mask;
4363 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4364 void *opaque)
4366 BitProperty *prop = opaque;
4367 g_free(prop);
4370 /* Register a boolean property to get/set a single bit in a uint32_t field.
4372 * The same property name can be registered multiple times to make it affect
4373 * multiple bits in the same FeatureWord. In that case, the getter will return
4374 * true only if all bits are set.
4376 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4377 const char *prop_name,
4378 FeatureWord w,
4379 int bitnr)
4381 BitProperty *fp;
4382 ObjectProperty *op;
4383 uint32_t mask = (1UL << bitnr);
4385 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4386 if (op) {
4387 fp = op->opaque;
4388 assert(fp->w == w);
4389 fp->mask |= mask;
4390 } else {
4391 fp = g_new0(BitProperty, 1);
4392 fp->w = w;
4393 fp->mask = mask;
4394 object_property_add(OBJECT(cpu), prop_name, "bool",
4395 x86_cpu_get_bit_prop,
4396 x86_cpu_set_bit_prop,
4397 x86_cpu_release_bit_prop, fp, &error_abort);
4401 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4402 FeatureWord w,
4403 int bitnr)
4405 FeatureWordInfo *fi = &feature_word_info[w];
4406 const char *name = fi->feat_names[bitnr];
4408 if (!name) {
4409 return;
4412 /* Property names should use "-" instead of "_".
4413 * Old names containing underscores are registered as aliases
4414 * using object_property_add_alias()
4416 assert(!strchr(name, '_'));
4417 /* aliases don't use "|" delimiters anymore, they are registered
4418 * manually using object_property_add_alias() */
4419 assert(!strchr(name, '|'));
4420 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4423 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4425 X86CPU *cpu = X86_CPU(cs);
4426 CPUX86State *env = &cpu->env;
4427 GuestPanicInformation *panic_info = NULL;
4429 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4430 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4432 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4434 assert(HV_CRASH_PARAMS >= 5);
4435 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4436 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4437 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4438 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4439 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4442 return panic_info;
4444 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4445 const char *name, void *opaque,
4446 Error **errp)
4448 CPUState *cs = CPU(obj);
4449 GuestPanicInformation *panic_info;
4451 if (!cs->crash_occurred) {
4452 error_setg(errp, "No crash occured");
4453 return;
4456 panic_info = x86_cpu_get_crash_info(cs);
4457 if (panic_info == NULL) {
4458 error_setg(errp, "No crash information");
4459 return;
4462 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4463 errp);
4464 qapi_free_GuestPanicInformation(panic_info);
4467 static void x86_cpu_initfn(Object *obj)
4469 CPUState *cs = CPU(obj);
4470 X86CPU *cpu = X86_CPU(obj);
4471 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4472 CPUX86State *env = &cpu->env;
4473 FeatureWord w;
4475 cs->env_ptr = env;
4477 object_property_add(obj, "family", "int",
4478 x86_cpuid_version_get_family,
4479 x86_cpuid_version_set_family, NULL, NULL, NULL);
4480 object_property_add(obj, "model", "int",
4481 x86_cpuid_version_get_model,
4482 x86_cpuid_version_set_model, NULL, NULL, NULL);
4483 object_property_add(obj, "stepping", "int",
4484 x86_cpuid_version_get_stepping,
4485 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4486 object_property_add_str(obj, "vendor",
4487 x86_cpuid_get_vendor,
4488 x86_cpuid_set_vendor, NULL);
4489 object_property_add_str(obj, "model-id",
4490 x86_cpuid_get_model_id,
4491 x86_cpuid_set_model_id, NULL);
4492 object_property_add(obj, "tsc-frequency", "int",
4493 x86_cpuid_get_tsc_freq,
4494 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4495 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4496 x86_cpu_get_feature_words,
4497 NULL, NULL, (void *)env->features, NULL);
4498 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4499 x86_cpu_get_feature_words,
4500 NULL, NULL, (void *)cpu->filtered_features, NULL);
4502 object_property_add(obj, "crash-information", "GuestPanicInformation",
4503 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4505 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4507 for (w = 0; w < FEATURE_WORDS; w++) {
4508 int bitnr;
4510 for (bitnr = 0; bitnr < 32; bitnr++) {
4511 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4515 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4516 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4517 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4518 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4519 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4520 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4521 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4523 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4524 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4525 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4526 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4527 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4528 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4529 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4530 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4531 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4532 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4533 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4534 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4535 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4536 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4537 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4538 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4539 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4540 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4541 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4542 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4543 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4545 if (xcc->cpu_def) {
4546 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4550 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4552 X86CPU *cpu = X86_CPU(cs);
4554 return cpu->apic_id;
4557 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4559 X86CPU *cpu = X86_CPU(cs);
4561 return cpu->env.cr[0] & CR0_PG_MASK;
4564 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4566 X86CPU *cpu = X86_CPU(cs);
4568 cpu->env.eip = value;
4571 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4573 X86CPU *cpu = X86_CPU(cs);
4575 cpu->env.eip = tb->pc - tb->cs_base;
4578 static bool x86_cpu_has_work(CPUState *cs)
4580 X86CPU *cpu = X86_CPU(cs);
4581 CPUX86State *env = &cpu->env;
4583 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4584 CPU_INTERRUPT_POLL)) &&
4585 (env->eflags & IF_MASK)) ||
4586 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4587 CPU_INTERRUPT_INIT |
4588 CPU_INTERRUPT_SIPI |
4589 CPU_INTERRUPT_MCE)) ||
4590 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4591 !(env->hflags & HF_SMM_MASK));
4594 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4596 X86CPU *cpu = X86_CPU(cs);
4597 CPUX86State *env = &cpu->env;
4599 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4600 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4601 : bfd_mach_i386_i8086);
4602 info->print_insn = print_insn_i386;
4604 info->cap_arch = CS_ARCH_X86;
4605 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4606 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4607 : CS_MODE_16);
4608 info->cap_insn_unit = 1;
4609 info->cap_insn_split = 8;
4612 void x86_update_hflags(CPUX86State *env)
4614 uint32_t hflags;
4615 #define HFLAG_COPY_MASK \
4616 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4617 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4618 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4619 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4621 hflags = env->hflags & HFLAG_COPY_MASK;
4622 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4623 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4624 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4625 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4626 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4628 if (env->cr[4] & CR4_OSFXSR_MASK) {
4629 hflags |= HF_OSFXSR_MASK;
4632 if (env->efer & MSR_EFER_LMA) {
4633 hflags |= HF_LMA_MASK;
4636 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4637 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4638 } else {
4639 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4640 (DESC_B_SHIFT - HF_CS32_SHIFT);
4641 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4642 (DESC_B_SHIFT - HF_SS32_SHIFT);
4643 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4644 !(hflags & HF_CS32_MASK)) {
4645 hflags |= HF_ADDSEG_MASK;
4646 } else {
4647 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4648 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4651 env->hflags = hflags;
4654 static Property x86_cpu_properties[] = {
4655 #ifdef CONFIG_USER_ONLY
4656 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4657 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4658 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4659 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4660 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4661 #else
4662 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4663 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4664 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4665 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4666 #endif
4667 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4668 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4669 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4670 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4671 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4672 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4673 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4674 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4675 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4676 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4677 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4678 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4679 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4680 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4681 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4682 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4683 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4684 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4685 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4686 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4687 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4688 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4689 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4690 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4691 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4692 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4693 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4694 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4695 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4696 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4697 false),
4698 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4699 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4702 * From "Requirements for Implementing the Microsoft
4703 * Hypervisor Interface":
4704 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4706 * "Starting with Windows Server 2012 and Windows 8, if
4707 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4708 * the hypervisor imposes no specific limit to the number of VPs.
4709 * In this case, Windows Server 2012 guest VMs may use more than
4710 * 64 VPs, up to the maximum supported number of processors applicable
4711 * to the specific Windows version being used."
4713 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4714 DEFINE_PROP_END_OF_LIST()
4717 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4719 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4720 CPUClass *cc = CPU_CLASS(oc);
4721 DeviceClass *dc = DEVICE_CLASS(oc);
4723 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4724 &xcc->parent_realize);
4725 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4726 &xcc->parent_unrealize);
4727 dc->props = x86_cpu_properties;
4729 xcc->parent_reset = cc->reset;
4730 cc->reset = x86_cpu_reset;
4731 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4733 cc->class_by_name = x86_cpu_class_by_name;
4734 cc->parse_features = x86_cpu_parse_featurestr;
4735 cc->has_work = x86_cpu_has_work;
4736 #ifdef CONFIG_TCG
4737 cc->do_interrupt = x86_cpu_do_interrupt;
4738 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4739 #endif
4740 cc->dump_state = x86_cpu_dump_state;
4741 cc->get_crash_info = x86_cpu_get_crash_info;
4742 cc->set_pc = x86_cpu_set_pc;
4743 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4744 cc->gdb_read_register = x86_cpu_gdb_read_register;
4745 cc->gdb_write_register = x86_cpu_gdb_write_register;
4746 cc->get_arch_id = x86_cpu_get_arch_id;
4747 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4748 #ifdef CONFIG_USER_ONLY
4749 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4750 #else
4751 cc->asidx_from_attrs = x86_asidx_from_attrs;
4752 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4753 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4754 cc->write_elf64_note = x86_cpu_write_elf64_note;
4755 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4756 cc->write_elf32_note = x86_cpu_write_elf32_note;
4757 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4758 cc->vmsd = &vmstate_x86_cpu;
4759 #endif
4760 cc->gdb_arch_name = x86_gdb_arch_name;
4761 #ifdef TARGET_X86_64
4762 cc->gdb_core_xml_file = "i386-64bit.xml";
4763 cc->gdb_num_core_regs = 57;
4764 #else
4765 cc->gdb_core_xml_file = "i386-32bit.xml";
4766 cc->gdb_num_core_regs = 41;
4767 #endif
4768 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4769 cc->debug_excp_handler = breakpoint_handler;
4770 #endif
4771 cc->cpu_exec_enter = x86_cpu_exec_enter;
4772 cc->cpu_exec_exit = x86_cpu_exec_exit;
4773 #ifdef CONFIG_TCG
4774 cc->tcg_initialize = tcg_x86_init;
4775 #endif
4776 cc->disas_set_info = x86_disas_set_info;
4778 dc->user_creatable = true;
4781 static const TypeInfo x86_cpu_type_info = {
4782 .name = TYPE_X86_CPU,
4783 .parent = TYPE_CPU,
4784 .instance_size = sizeof(X86CPU),
4785 .instance_init = x86_cpu_initfn,
4786 .abstract = true,
4787 .class_size = sizeof(X86CPUClass),
4788 .class_init = x86_cpu_common_class_init,
4792 /* "base" CPU model, used by query-cpu-model-expansion */
4793 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4795 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4797 xcc->static_model = true;
4798 xcc->migration_safe = true;
4799 xcc->model_description = "base CPU model type with no features enabled";
4800 xcc->ordering = 8;
4803 static const TypeInfo x86_base_cpu_type_info = {
4804 .name = X86_CPU_TYPE_NAME("base"),
4805 .parent = TYPE_X86_CPU,
4806 .class_init = x86_cpu_base_class_init,
4809 static void x86_cpu_register_types(void)
4811 int i;
4813 type_register_static(&x86_cpu_type_info);
4814 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4815 x86_register_cpudef_type(&builtin_x86_defs[i]);
4817 type_register_static(&max_x86_cpu_type_info);
4818 type_register_static(&x86_base_cpu_type_info);
4819 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4820 type_register_static(&host_x86_cpu_type_info);
4821 #endif
4824 type_init(x86_cpu_register_types)