target-i386: add Skylake-Client cpu model
[qemu/ar7.git] / target-i386 / cpu.c
blob9c5aabcb76a0d33d05a54e4a2b978c18f4ecfadf
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
46 #include "hw/hw.h"
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
52 /* Cache topology CPUID constants: */
54 /* CPUID Leaf 2 Descriptors */
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61 /* CPUID Leaf 4 constants: */
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
68 #define CPUID_4_LEVEL(l) ((l) << 5)
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
78 #define ASSOC_FULL 0xFF
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
95 /* Definitions of the hardcoded cache entries we expose: */
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
139 /* TLB definitions: */
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
172 dst[CPUID_VENDOR_SZ] = '\0';
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
262 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 static const char *cpuid_7_0_ecx_feature_name[] = {
266 NULL, NULL, NULL, "pku",
267 "ospke", NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *cpuid_apm_edx_feature_name[] = {
277 NULL, NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 "invtsc", NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
287 static const char *cpuid_xsave_feature_name[] = {
288 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
298 static const char *cpuid_6_feature_name[] = {
299 NULL, NULL, "arat", NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
309 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
310 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
311 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
312 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_FXSR)
315 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
316 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
317 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
318 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
319 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
322 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
323 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
324 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
325 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
326 /* partly implemented:
327 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
328 /* missing:
329 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
330 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
331 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
332 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
333 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 /* missing:
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
340 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342 #ifdef TARGET_X86_64
343 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
344 #else
345 #define TCG_EXT2_X86_64_FEATURES 0
346 #endif
348 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
349 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
350 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
351 TCG_EXT2_X86_64_FEATURES)
352 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
353 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
354 #define TCG_EXT4_FEATURES 0
355 #define TCG_SVM_FEATURES 0
356 #define TCG_KVM_FEATURES 0
357 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
358 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
359 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
360 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
361 /* missing:
362 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
363 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
364 CPUID_7_0_EBX_RDSEED */
365 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
366 #define TCG_APM_FEATURES 0
367 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
368 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
369 /* missing:
370 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372 typedef struct FeatureWordInfo {
373 const char **feat_names;
374 uint32_t cpuid_eax; /* Input EAX for CPUID */
375 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
376 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
377 int cpuid_reg; /* output register (R_* constant) */
378 uint32_t tcg_features; /* Feature flags supported by TCG */
379 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
380 } FeatureWordInfo;
382 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
383 [FEAT_1_EDX] = {
384 .feat_names = feature_name,
385 .cpuid_eax = 1, .cpuid_reg = R_EDX,
386 .tcg_features = TCG_FEATURES,
388 [FEAT_1_ECX] = {
389 .feat_names = ext_feature_name,
390 .cpuid_eax = 1, .cpuid_reg = R_ECX,
391 .tcg_features = TCG_EXT_FEATURES,
393 [FEAT_8000_0001_EDX] = {
394 .feat_names = ext2_feature_name,
395 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
396 .tcg_features = TCG_EXT2_FEATURES,
398 [FEAT_8000_0001_ECX] = {
399 .feat_names = ext3_feature_name,
400 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
401 .tcg_features = TCG_EXT3_FEATURES,
403 [FEAT_C000_0001_EDX] = {
404 .feat_names = ext4_feature_name,
405 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
406 .tcg_features = TCG_EXT4_FEATURES,
408 [FEAT_KVM] = {
409 .feat_names = kvm_feature_name,
410 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
411 .tcg_features = TCG_KVM_FEATURES,
413 [FEAT_SVM] = {
414 .feat_names = svm_feature_name,
415 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
416 .tcg_features = TCG_SVM_FEATURES,
418 [FEAT_7_0_EBX] = {
419 .feat_names = cpuid_7_0_ebx_feature_name,
420 .cpuid_eax = 7,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
422 .cpuid_reg = R_EBX,
423 .tcg_features = TCG_7_0_EBX_FEATURES,
425 [FEAT_7_0_ECX] = {
426 .feat_names = cpuid_7_0_ecx_feature_name,
427 .cpuid_eax = 7,
428 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .cpuid_reg = R_ECX,
430 .tcg_features = TCG_7_0_ECX_FEATURES,
432 [FEAT_8000_0007_EDX] = {
433 .feat_names = cpuid_apm_edx_feature_name,
434 .cpuid_eax = 0x80000007,
435 .cpuid_reg = R_EDX,
436 .tcg_features = TCG_APM_FEATURES,
437 .unmigratable_flags = CPUID_APM_INVTSC,
439 [FEAT_XSAVE] = {
440 .feat_names = cpuid_xsave_feature_name,
441 .cpuid_eax = 0xd,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
443 .cpuid_reg = R_EAX,
444 .tcg_features = TCG_XSAVE_FEATURES,
446 [FEAT_6_EAX] = {
447 .feat_names = cpuid_6_feature_name,
448 .cpuid_eax = 6, .cpuid_reg = R_EAX,
449 .tcg_features = TCG_6_EAX_FEATURES,
453 typedef struct X86RegisterInfo32 {
454 /* Name of register */
455 const char *name;
456 /* QAPI enum value register */
457 X86CPURegister32 qapi_enum;
458 } X86RegisterInfo32;
460 #define REGISTER(reg) \
461 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
462 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
463 REGISTER(EAX),
464 REGISTER(ECX),
465 REGISTER(EDX),
466 REGISTER(EBX),
467 REGISTER(ESP),
468 REGISTER(EBP),
469 REGISTER(ESI),
470 REGISTER(EDI),
472 #undef REGISTER
474 const ExtSaveArea x86_ext_save_areas[] = {
475 [XSTATE_YMM_BIT] =
476 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = offsetof(X86XSaveArea, avx_state),
478 .size = sizeof(XSaveAVX) },
479 [XSTATE_BNDREGS_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = offsetof(X86XSaveArea, bndreg_state),
482 .size = sizeof(XSaveBNDREG) },
483 [XSTATE_BNDCSR_BIT] =
484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
485 .offset = offsetof(X86XSaveArea, bndcsr_state),
486 .size = sizeof(XSaveBNDCSR) },
487 [XSTATE_OPMASK_BIT] =
488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
489 .offset = offsetof(X86XSaveArea, opmask_state),
490 .size = sizeof(XSaveOpmask) },
491 [XSTATE_ZMM_Hi256_BIT] =
492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
493 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
494 .size = sizeof(XSaveZMM_Hi256) },
495 [XSTATE_Hi16_ZMM_BIT] =
496 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
497 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
498 .size = sizeof(XSaveHi16_ZMM) },
499 [XSTATE_PKRU_BIT] =
500 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
501 .offset = offsetof(X86XSaveArea, pkru_state),
502 .size = sizeof(XSavePKRU) },
505 const char *get_register_name_32(unsigned int reg)
507 if (reg >= CPU_NB_REGS32) {
508 return NULL;
510 return x86_reg_info_32[reg].name;
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519 FeatureWordInfo *wi = &feature_word_info[w];
520 uint32_t r = 0;
521 int i;
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
527 continue;
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
531 continue;
533 r |= f;
535 return r;
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
541 uint32_t vec[4];
543 #ifdef __x86_64__
544 asm volatile("cpuid"
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
550 "cpuid \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
555 "popa"
556 : : "a"(function), "c"(count), "S"(vec)
557 : "memory", "cc");
558 #else
559 abort();
560 #endif
562 if (eax)
563 *eax = vec[0];
564 if (ebx)
565 *ebx = vec[1];
566 if (ecx)
567 *ecx = vec[2];
568 if (edx)
569 *edx = vec[3];
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
582 for (;;) {
583 if (!*s1 || !*s2 || *s1 != *s2)
584 return (*s1 - *s2);
585 ++s1, ++s2;
586 if (s1 == e1 && s2 == e2)
587 return (0);
588 else if (s1 == e1)
589 return (*s2);
590 else if (s2 == e2)
591 return (*s1);
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
600 static int altcmp(const char *s, const char *e, const char *altstr)
602 const char *p, *q;
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
606 ++p;
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 return (0);
609 if (!*p)
610 return (1);
611 else
612 q = ++p;
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
622 uint32_t mask;
623 const char **ppc;
624 bool found = false;
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
628 *pval |= mask;
629 found = true;
632 return found;
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
637 Error **errp)
639 FeatureWord w;
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
644 break;
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
652 /* CPU class name definitions: */
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
660 static char *x86_cpu_type_name(const char *model_name)
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667 ObjectClass *oc;
668 char *typename;
670 if (cpu_model == NULL) {
671 return NULL;
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
676 g_free(typename);
677 return oc;
680 struct X86CPUDefinition {
681 const char *name;
682 uint32_t level;
683 uint32_t xlevel;
684 uint32_t xlevel2;
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
687 int family;
688 int model;
689 int stepping;
690 FeatureWordArray features;
691 char model_id[48];
694 static X86CPUDefinition builtin_x86_defs[] = {
696 .name = "qemu64",
697 .level = 0xd,
698 .vendor = CPUID_VENDOR_AMD,
699 .family = 6,
700 .model = 6,
701 .stepping = 3,
702 .features[FEAT_1_EDX] =
703 PPRO_FEATURES |
704 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
705 CPUID_PSE36,
706 .features[FEAT_1_ECX] =
707 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
708 .features[FEAT_8000_0001_EDX] =
709 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
710 .features[FEAT_8000_0001_ECX] =
711 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
712 .xlevel = 0x8000000A,
713 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
716 .name = "phenom",
717 .level = 5,
718 .vendor = CPUID_VENDOR_AMD,
719 .family = 16,
720 .model = 2,
721 .stepping = 3,
722 /* Missing: CPUID_HT */
723 .features[FEAT_1_EDX] =
724 PPRO_FEATURES |
725 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
726 CPUID_PSE36 | CPUID_VME,
727 .features[FEAT_1_ECX] =
728 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
729 CPUID_EXT_POPCNT,
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
732 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
733 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
734 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
735 CPUID_EXT3_CR8LEG,
736 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
737 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
738 .features[FEAT_8000_0001_ECX] =
739 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
740 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
741 /* Missing: CPUID_SVM_LBRV */
742 .features[FEAT_SVM] =
743 CPUID_SVM_NPT,
744 .xlevel = 0x8000001A,
745 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
748 .name = "core2duo",
749 .level = 10,
750 .vendor = CPUID_VENDOR_INTEL,
751 .family = 6,
752 .model = 15,
753 .stepping = 11,
754 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
755 .features[FEAT_1_EDX] =
756 PPRO_FEATURES |
757 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
758 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
759 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
760 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
761 .features[FEAT_1_ECX] =
762 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
763 CPUID_EXT_CX16,
764 .features[FEAT_8000_0001_EDX] =
765 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
766 .features[FEAT_8000_0001_ECX] =
767 CPUID_EXT3_LAHF_LM,
768 .xlevel = 0x80000008,
769 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
772 .name = "kvm64",
773 .level = 0xd,
774 .vendor = CPUID_VENDOR_INTEL,
775 .family = 15,
776 .model = 6,
777 .stepping = 1,
778 /* Missing: CPUID_HT */
779 .features[FEAT_1_EDX] =
780 PPRO_FEATURES | CPUID_VME |
781 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
782 CPUID_PSE36,
783 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
784 .features[FEAT_1_ECX] =
785 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
786 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
787 .features[FEAT_8000_0001_EDX] =
788 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
789 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
790 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
791 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
792 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
793 .features[FEAT_8000_0001_ECX] =
795 .xlevel = 0x80000008,
796 .model_id = "Common KVM processor"
799 .name = "qemu32",
800 .level = 4,
801 .vendor = CPUID_VENDOR_INTEL,
802 .family = 6,
803 .model = 6,
804 .stepping = 3,
805 .features[FEAT_1_EDX] =
806 PPRO_FEATURES,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .xlevel = 0x80000004,
810 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
813 .name = "kvm32",
814 .level = 5,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 15,
817 .model = 6,
818 .stepping = 1,
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3,
824 .features[FEAT_8000_0001_ECX] =
826 .xlevel = 0x80000008,
827 .model_id = "Common 32-bit KVM processor"
830 .name = "coreduo",
831 .level = 10,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 6,
834 .model = 14,
835 .stepping = 8,
836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
840 CPUID_SS,
841 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
842 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
843 .features[FEAT_1_ECX] =
844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_NX,
847 .xlevel = 0x80000008,
848 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
851 .name = "486",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 4,
855 .model = 8,
856 .stepping = 0,
857 .features[FEAT_1_EDX] =
858 I486_FEATURES,
859 .xlevel = 0,
862 .name = "pentium",
863 .level = 1,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 5,
866 .model = 4,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PENTIUM_FEATURES,
870 .xlevel = 0,
873 .name = "pentium2",
874 .level = 2,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 5,
878 .stepping = 2,
879 .features[FEAT_1_EDX] =
880 PENTIUM2_FEATURES,
881 .xlevel = 0,
884 .name = "pentium3",
885 .level = 3,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 6,
888 .model = 7,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM3_FEATURES,
892 .xlevel = 0,
895 .name = "athlon",
896 .level = 2,
897 .vendor = CPUID_VENDOR_AMD,
898 .family = 6,
899 .model = 2,
900 .stepping = 3,
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
903 CPUID_MCA,
904 .features[FEAT_8000_0001_EDX] =
905 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
906 .xlevel = 0x80000008,
907 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
910 .name = "n270",
911 .level = 10,
912 .vendor = CPUID_VENDOR_INTEL,
913 .family = 6,
914 .model = 28,
915 .stepping = 2,
916 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
917 .features[FEAT_1_EDX] =
918 PPRO_FEATURES |
919 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
920 CPUID_ACPI | CPUID_SS,
921 /* Some CPUs got no CPUID_SEP */
922 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
923 * CPUID_EXT_XTPR */
924 .features[FEAT_1_ECX] =
925 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
926 CPUID_EXT_MOVBE,
927 .features[FEAT_8000_0001_EDX] =
928 CPUID_EXT2_NX,
929 .features[FEAT_8000_0001_ECX] =
930 CPUID_EXT3_LAHF_LM,
931 .xlevel = 0x80000008,
932 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
935 .name = "Conroe",
936 .level = 10,
937 .vendor = CPUID_VENDOR_INTEL,
938 .family = 6,
939 .model = 15,
940 .stepping = 3,
941 .features[FEAT_1_EDX] =
942 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
943 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
944 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
945 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
946 CPUID_DE | CPUID_FP87,
947 .features[FEAT_1_ECX] =
948 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
949 .features[FEAT_8000_0001_EDX] =
950 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
951 .features[FEAT_8000_0001_ECX] =
952 CPUID_EXT3_LAHF_LM,
953 .xlevel = 0x80000008,
954 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
957 .name = "Penryn",
958 .level = 10,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 23,
962 .stepping = 3,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
971 CPUID_EXT_SSE3,
972 .features[FEAT_8000_0001_EDX] =
973 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
974 .features[FEAT_8000_0001_ECX] =
975 CPUID_EXT3_LAHF_LM,
976 .xlevel = 0x80000008,
977 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
980 .name = "Nehalem",
981 .level = 11,
982 .vendor = CPUID_VENDOR_INTEL,
983 .family = 6,
984 .model = 26,
985 .stepping = 3,
986 .features[FEAT_1_EDX] =
987 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
988 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
989 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
990 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
991 CPUID_DE | CPUID_FP87,
992 .features[FEAT_1_ECX] =
993 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
994 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
995 .features[FEAT_8000_0001_EDX] =
996 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
997 .features[FEAT_8000_0001_ECX] =
998 CPUID_EXT3_LAHF_LM,
999 .xlevel = 0x80000008,
1000 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1003 .name = "Westmere",
1004 .level = 11,
1005 .vendor = CPUID_VENDOR_INTEL,
1006 .family = 6,
1007 .model = 44,
1008 .stepping = 1,
1009 .features[FEAT_1_EDX] =
1010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1014 CPUID_DE | CPUID_FP87,
1015 .features[FEAT_1_ECX] =
1016 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1017 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1018 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1019 .features[FEAT_8000_0001_EDX] =
1020 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1021 .features[FEAT_8000_0001_ECX] =
1022 CPUID_EXT3_LAHF_LM,
1023 .features[FEAT_6_EAX] =
1024 CPUID_6_EAX_ARAT,
1025 .xlevel = 0x80000008,
1026 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1029 .name = "SandyBridge",
1030 .level = 0xd,
1031 .vendor = CPUID_VENDOR_INTEL,
1032 .family = 6,
1033 .model = 42,
1034 .stepping = 1,
1035 .features[FEAT_1_EDX] =
1036 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1037 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1038 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1039 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1040 CPUID_DE | CPUID_FP87,
1041 .features[FEAT_1_ECX] =
1042 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1044 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1045 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1046 CPUID_EXT_SSE3,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 CPUID_EXT2_SYSCALL,
1050 .features[FEAT_8000_0001_ECX] =
1051 CPUID_EXT3_LAHF_LM,
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1055 CPUID_6_EAX_ARAT,
1056 .xlevel = 0x80000008,
1057 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1060 .name = "IvyBridge",
1061 .level = 0xd,
1062 .vendor = CPUID_VENDOR_INTEL,
1063 .family = 6,
1064 .model = 58,
1065 .stepping = 9,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1075 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1076 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1077 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1078 .features[FEAT_7_0_EBX] =
1079 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1080 CPUID_7_0_EBX_ERMS,
1081 .features[FEAT_8000_0001_EDX] =
1082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1083 CPUID_EXT2_SYSCALL,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_LAHF_LM,
1086 .features[FEAT_XSAVE] =
1087 CPUID_XSAVE_XSAVEOPT,
1088 .features[FEAT_6_EAX] =
1089 CPUID_6_EAX_ARAT,
1090 .xlevel = 0x80000008,
1091 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1094 .name = "Haswell-noTSX",
1095 .level = 0xd,
1096 .vendor = CPUID_VENDOR_INTEL,
1097 .family = 6,
1098 .model = 60,
1099 .stepping = 1,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1109 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1110 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1111 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1112 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 CPUID_EXT2_SYSCALL,
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1120 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1121 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1122 .features[FEAT_XSAVE] =
1123 CPUID_XSAVE_XSAVEOPT,
1124 .features[FEAT_6_EAX] =
1125 CPUID_6_EAX_ARAT,
1126 .xlevel = 0x80000008,
1127 .model_id = "Intel Core Processor (Haswell, no TSX)",
1128 }, {
1129 .name = "Haswell",
1130 .level = 0xd,
1131 .vendor = CPUID_VENDOR_INTEL,
1132 .family = 6,
1133 .model = 60,
1134 .stepping = 1,
1135 .features[FEAT_1_EDX] =
1136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1140 CPUID_DE | CPUID_FP87,
1141 .features[FEAT_1_ECX] =
1142 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1143 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1144 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1145 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1146 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1147 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1150 CPUID_EXT2_SYSCALL,
1151 .features[FEAT_8000_0001_ECX] =
1152 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1153 .features[FEAT_7_0_EBX] =
1154 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1155 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1156 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1157 CPUID_7_0_EBX_RTM,
1158 .features[FEAT_XSAVE] =
1159 CPUID_XSAVE_XSAVEOPT,
1160 .features[FEAT_6_EAX] =
1161 CPUID_6_EAX_ARAT,
1162 .xlevel = 0x80000008,
1163 .model_id = "Intel Core Processor (Haswell)",
1166 .name = "Broadwell-noTSX",
1167 .level = 0xd,
1168 .vendor = CPUID_VENDOR_INTEL,
1169 .family = 6,
1170 .model = 61,
1171 .stepping = 2,
1172 .features[FEAT_1_EDX] =
1173 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1174 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1175 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1176 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1177 CPUID_DE | CPUID_FP87,
1178 .features[FEAT_1_ECX] =
1179 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1180 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1182 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1184 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1185 .features[FEAT_8000_0001_EDX] =
1186 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1187 CPUID_EXT2_SYSCALL,
1188 .features[FEAT_8000_0001_ECX] =
1189 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1190 .features[FEAT_7_0_EBX] =
1191 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1192 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1193 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1194 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1195 CPUID_7_0_EBX_SMAP,
1196 .features[FEAT_XSAVE] =
1197 CPUID_XSAVE_XSAVEOPT,
1198 .features[FEAT_6_EAX] =
1199 CPUID_6_EAX_ARAT,
1200 .xlevel = 0x80000008,
1201 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1204 .name = "Broadwell",
1205 .level = 0xd,
1206 .vendor = CPUID_VENDOR_INTEL,
1207 .family = 6,
1208 .model = 61,
1209 .stepping = 2,
1210 .features[FEAT_1_EDX] =
1211 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1212 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1213 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1214 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1215 CPUID_DE | CPUID_FP87,
1216 .features[FEAT_1_ECX] =
1217 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1218 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1219 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1220 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1221 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1222 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1225 CPUID_EXT2_SYSCALL,
1226 .features[FEAT_8000_0001_ECX] =
1227 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1228 .features[FEAT_7_0_EBX] =
1229 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1230 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1231 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1232 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1233 CPUID_7_0_EBX_SMAP,
1234 .features[FEAT_XSAVE] =
1235 CPUID_XSAVE_XSAVEOPT,
1236 .features[FEAT_6_EAX] =
1237 CPUID_6_EAX_ARAT,
1238 .xlevel = 0x80000008,
1239 .model_id = "Intel Core Processor (Broadwell)",
1242 .name = "Skylake-Client",
1243 .level = 0xd,
1244 .vendor = CPUID_VENDOR_INTEL,
1245 .family = 6,
1246 .model = 94,
1247 .stepping = 3,
1248 .features[FEAT_1_EDX] =
1249 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1250 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1251 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1252 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1253 CPUID_DE | CPUID_FP87,
1254 .features[FEAT_1_ECX] =
1255 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1256 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1257 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1258 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1259 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1260 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1261 .features[FEAT_8000_0001_EDX] =
1262 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1263 CPUID_EXT2_SYSCALL,
1264 .features[FEAT_8000_0001_ECX] =
1265 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1266 .features[FEAT_7_0_EBX] =
1267 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1268 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1269 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1270 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1271 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1272 /* Missing: XSAVES (not supported by some Linux versions,
1273 * including v4.1 to v4.6).
1274 * KVM doesn't yet expose any XSAVES state save component,
1275 * and the only one defined in Skylake (processor tracing)
1276 * probably will block migration anyway.
1278 .features[FEAT_XSAVE] =
1279 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1280 CPUID_XSAVE_XGETBV1,
1281 .features[FEAT_6_EAX] =
1282 CPUID_6_EAX_ARAT,
1283 .xlevel = 0x80000008,
1284 .model_id = "Intel Core Processor (Skylake)",
1287 .name = "Opteron_G1",
1288 .level = 5,
1289 .vendor = CPUID_VENDOR_AMD,
1290 .family = 15,
1291 .model = 6,
1292 .stepping = 1,
1293 .features[FEAT_1_EDX] =
1294 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1295 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1296 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1297 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1298 CPUID_DE | CPUID_FP87,
1299 .features[FEAT_1_ECX] =
1300 CPUID_EXT_SSE3,
1301 .features[FEAT_8000_0001_EDX] =
1302 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1303 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1304 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1305 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1306 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1307 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1308 .xlevel = 0x80000008,
1309 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1312 .name = "Opteron_G2",
1313 .level = 5,
1314 .vendor = CPUID_VENDOR_AMD,
1315 .family = 15,
1316 .model = 6,
1317 .stepping = 1,
1318 .features[FEAT_1_EDX] =
1319 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1320 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1321 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1322 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1323 CPUID_DE | CPUID_FP87,
1324 .features[FEAT_1_ECX] =
1325 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1326 /* Missing: CPUID_EXT2_RDTSCP */
1327 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1329 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1330 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1331 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1332 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1333 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1334 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1335 .features[FEAT_8000_0001_ECX] =
1336 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1337 .xlevel = 0x80000008,
1338 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1341 .name = "Opteron_G3",
1342 .level = 5,
1343 .vendor = CPUID_VENDOR_AMD,
1344 .family = 15,
1345 .model = 6,
1346 .stepping = 1,
1347 .features[FEAT_1_EDX] =
1348 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1349 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1350 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1351 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1352 CPUID_DE | CPUID_FP87,
1353 .features[FEAT_1_ECX] =
1354 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1355 CPUID_EXT_SSE3,
1356 /* Missing: CPUID_EXT2_RDTSCP */
1357 .features[FEAT_8000_0001_EDX] =
1358 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1359 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1360 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1361 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1362 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1363 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1364 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1365 .features[FEAT_8000_0001_ECX] =
1366 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1367 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1368 .xlevel = 0x80000008,
1369 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1372 .name = "Opteron_G4",
1373 .level = 0xd,
1374 .vendor = CPUID_VENDOR_AMD,
1375 .family = 21,
1376 .model = 1,
1377 .stepping = 2,
1378 .features[FEAT_1_EDX] =
1379 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1380 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1381 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1382 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1383 CPUID_DE | CPUID_FP87,
1384 .features[FEAT_1_ECX] =
1385 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1386 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1387 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1388 CPUID_EXT_SSE3,
1389 /* Missing: CPUID_EXT2_RDTSCP */
1390 .features[FEAT_8000_0001_EDX] =
1391 CPUID_EXT2_LM |
1392 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1393 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1394 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1395 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1396 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1397 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1398 .features[FEAT_8000_0001_ECX] =
1399 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1400 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1401 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1402 CPUID_EXT3_LAHF_LM,
1403 /* no xsaveopt! */
1404 .xlevel = 0x8000001A,
1405 .model_id = "AMD Opteron 62xx class CPU",
1408 .name = "Opteron_G5",
1409 .level = 0xd,
1410 .vendor = CPUID_VENDOR_AMD,
1411 .family = 21,
1412 .model = 2,
1413 .stepping = 0,
1414 .features[FEAT_1_EDX] =
1415 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1416 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1417 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1418 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1419 CPUID_DE | CPUID_FP87,
1420 .features[FEAT_1_ECX] =
1421 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1422 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1423 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1424 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1425 /* Missing: CPUID_EXT2_RDTSCP */
1426 .features[FEAT_8000_0001_EDX] =
1427 CPUID_EXT2_LM |
1428 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1429 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1430 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1431 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1432 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1433 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1434 .features[FEAT_8000_0001_ECX] =
1435 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1436 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1437 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1438 CPUID_EXT3_LAHF_LM,
1439 /* no xsaveopt! */
1440 .xlevel = 0x8000001A,
1441 .model_id = "AMD Opteron 63xx class CPU",
1445 typedef struct PropValue {
1446 const char *prop, *value;
1447 } PropValue;
1449 /* KVM-specific features that are automatically added/removed
1450 * from all CPU models when KVM is enabled.
1452 static PropValue kvm_default_props[] = {
1453 { "kvmclock", "on" },
1454 { "kvm-nopiodelay", "on" },
1455 { "kvm-asyncpf", "on" },
1456 { "kvm-steal-time", "on" },
1457 { "kvm-pv-eoi", "on" },
1458 { "kvmclock-stable-bit", "on" },
1459 { "x2apic", "on" },
1460 { "acpi", "off" },
1461 { "monitor", "off" },
1462 { "svm", "off" },
1463 { NULL, NULL },
1466 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1468 PropValue *pv;
1469 for (pv = kvm_default_props; pv->prop; pv++) {
1470 if (!strcmp(pv->prop, prop)) {
1471 pv->value = value;
1472 break;
1476 /* It is valid to call this function only for properties that
1477 * are already present in the kvm_default_props table.
1479 assert(pv->prop);
1482 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1483 bool migratable_only);
1485 #ifdef CONFIG_KVM
1487 static int cpu_x86_fill_model_id(char *str)
1489 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1490 int i;
1492 for (i = 0; i < 3; i++) {
1493 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1494 memcpy(str + i * 16 + 0, &eax, 4);
1495 memcpy(str + i * 16 + 4, &ebx, 4);
1496 memcpy(str + i * 16 + 8, &ecx, 4);
1497 memcpy(str + i * 16 + 12, &edx, 4);
1499 return 0;
1502 static X86CPUDefinition host_cpudef;
1504 static Property host_x86_cpu_properties[] = {
1505 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1506 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1507 DEFINE_PROP_END_OF_LIST()
1510 /* class_init for the "host" CPU model
1512 * This function may be called before KVM is initialized.
1514 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1516 DeviceClass *dc = DEVICE_CLASS(oc);
1517 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1518 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1520 xcc->kvm_required = true;
1522 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1523 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1525 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1526 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1527 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1528 host_cpudef.stepping = eax & 0x0F;
1530 cpu_x86_fill_model_id(host_cpudef.model_id);
1532 xcc->cpu_def = &host_cpudef;
1534 /* level, xlevel, xlevel2, and the feature words are initialized on
1535 * instance_init, because they require KVM to be initialized.
1538 dc->props = host_x86_cpu_properties;
1539 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1540 dc->cannot_destroy_with_object_finalize_yet = true;
1543 static void host_x86_cpu_initfn(Object *obj)
1545 X86CPU *cpu = X86_CPU(obj);
1546 CPUX86State *env = &cpu->env;
1547 KVMState *s = kvm_state;
1549 assert(kvm_enabled());
1551 /* We can't fill the features array here because we don't know yet if
1552 * "migratable" is true or false.
1554 cpu->host_features = true;
1556 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1557 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1558 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1560 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1563 static const TypeInfo host_x86_cpu_type_info = {
1564 .name = X86_CPU_TYPE_NAME("host"),
1565 .parent = TYPE_X86_CPU,
1566 .instance_init = host_x86_cpu_initfn,
1567 .class_init = host_x86_cpu_class_init,
1570 #endif
1572 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1574 FeatureWordInfo *f = &feature_word_info[w];
1575 int i;
1577 for (i = 0; i < 32; ++i) {
1578 if ((1UL << i) & mask) {
1579 const char *reg = get_register_name_32(f->cpuid_reg);
1580 assert(reg);
1581 fprintf(stderr, "warning: %s doesn't support requested feature: "
1582 "CPUID.%02XH:%s%s%s [bit %d]\n",
1583 kvm_enabled() ? "host" : "TCG",
1584 f->cpuid_eax, reg,
1585 f->feat_names[i] ? "." : "",
1586 f->feat_names[i] ? f->feat_names[i] : "", i);
1591 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1592 const char *name, void *opaque,
1593 Error **errp)
1595 X86CPU *cpu = X86_CPU(obj);
1596 CPUX86State *env = &cpu->env;
1597 int64_t value;
1599 value = (env->cpuid_version >> 8) & 0xf;
1600 if (value == 0xf) {
1601 value += (env->cpuid_version >> 20) & 0xff;
1603 visit_type_int(v, name, &value, errp);
1606 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1607 const char *name, void *opaque,
1608 Error **errp)
1610 X86CPU *cpu = X86_CPU(obj);
1611 CPUX86State *env = &cpu->env;
1612 const int64_t min = 0;
1613 const int64_t max = 0xff + 0xf;
1614 Error *local_err = NULL;
1615 int64_t value;
1617 visit_type_int(v, name, &value, &local_err);
1618 if (local_err) {
1619 error_propagate(errp, local_err);
1620 return;
1622 if (value < min || value > max) {
1623 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1624 name ? name : "null", value, min, max);
1625 return;
1628 env->cpuid_version &= ~0xff00f00;
1629 if (value > 0x0f) {
1630 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1631 } else {
1632 env->cpuid_version |= value << 8;
1636 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1637 const char *name, void *opaque,
1638 Error **errp)
1640 X86CPU *cpu = X86_CPU(obj);
1641 CPUX86State *env = &cpu->env;
1642 int64_t value;
1644 value = (env->cpuid_version >> 4) & 0xf;
1645 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1646 visit_type_int(v, name, &value, errp);
1649 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1650 const char *name, void *opaque,
1651 Error **errp)
1653 X86CPU *cpu = X86_CPU(obj);
1654 CPUX86State *env = &cpu->env;
1655 const int64_t min = 0;
1656 const int64_t max = 0xff;
1657 Error *local_err = NULL;
1658 int64_t value;
1660 visit_type_int(v, name, &value, &local_err);
1661 if (local_err) {
1662 error_propagate(errp, local_err);
1663 return;
1665 if (value < min || value > max) {
1666 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1667 name ? name : "null", value, min, max);
1668 return;
1671 env->cpuid_version &= ~0xf00f0;
1672 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1675 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1676 const char *name, void *opaque,
1677 Error **errp)
1679 X86CPU *cpu = X86_CPU(obj);
1680 CPUX86State *env = &cpu->env;
1681 int64_t value;
1683 value = env->cpuid_version & 0xf;
1684 visit_type_int(v, name, &value, errp);
1687 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1688 const char *name, void *opaque,
1689 Error **errp)
1691 X86CPU *cpu = X86_CPU(obj);
1692 CPUX86State *env = &cpu->env;
1693 const int64_t min = 0;
1694 const int64_t max = 0xf;
1695 Error *local_err = NULL;
1696 int64_t value;
1698 visit_type_int(v, name, &value, &local_err);
1699 if (local_err) {
1700 error_propagate(errp, local_err);
1701 return;
1703 if (value < min || value > max) {
1704 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1705 name ? name : "null", value, min, max);
1706 return;
1709 env->cpuid_version &= ~0xf;
1710 env->cpuid_version |= value & 0xf;
1713 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1715 X86CPU *cpu = X86_CPU(obj);
1716 CPUX86State *env = &cpu->env;
1717 char *value;
1719 value = g_malloc(CPUID_VENDOR_SZ + 1);
1720 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1721 env->cpuid_vendor3);
1722 return value;
1725 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1726 Error **errp)
1728 X86CPU *cpu = X86_CPU(obj);
1729 CPUX86State *env = &cpu->env;
1730 int i;
1732 if (strlen(value) != CPUID_VENDOR_SZ) {
1733 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1734 return;
1737 env->cpuid_vendor1 = 0;
1738 env->cpuid_vendor2 = 0;
1739 env->cpuid_vendor3 = 0;
1740 for (i = 0; i < 4; i++) {
1741 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1742 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1743 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1747 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1749 X86CPU *cpu = X86_CPU(obj);
1750 CPUX86State *env = &cpu->env;
1751 char *value;
1752 int i;
1754 value = g_malloc(48 + 1);
1755 for (i = 0; i < 48; i++) {
1756 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1758 value[48] = '\0';
1759 return value;
1762 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1763 Error **errp)
1765 X86CPU *cpu = X86_CPU(obj);
1766 CPUX86State *env = &cpu->env;
1767 int c, len, i;
1769 if (model_id == NULL) {
1770 model_id = "";
1772 len = strlen(model_id);
1773 memset(env->cpuid_model, 0, 48);
1774 for (i = 0; i < 48; i++) {
1775 if (i >= len) {
1776 c = '\0';
1777 } else {
1778 c = (uint8_t)model_id[i];
1780 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1784 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1785 void *opaque, Error **errp)
1787 X86CPU *cpu = X86_CPU(obj);
1788 int64_t value;
1790 value = cpu->env.tsc_khz * 1000;
1791 visit_type_int(v, name, &value, errp);
1794 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1795 void *opaque, Error **errp)
1797 X86CPU *cpu = X86_CPU(obj);
1798 const int64_t min = 0;
1799 const int64_t max = INT64_MAX;
1800 Error *local_err = NULL;
1801 int64_t value;
1803 visit_type_int(v, name, &value, &local_err);
1804 if (local_err) {
1805 error_propagate(errp, local_err);
1806 return;
1808 if (value < min || value > max) {
1809 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1810 name ? name : "null", value, min, max);
1811 return;
1814 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1817 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1818 void *opaque, Error **errp)
1820 X86CPU *cpu = X86_CPU(obj);
1821 int64_t value = cpu->apic_id;
1823 visit_type_int(v, name, &value, errp);
1826 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1827 void *opaque, Error **errp)
1829 X86CPU *cpu = X86_CPU(obj);
1830 DeviceState *dev = DEVICE(obj);
1831 const int64_t min = 0;
1832 const int64_t max = UINT32_MAX;
1833 Error *error = NULL;
1834 int64_t value;
1836 if (dev->realized) {
1837 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1838 "it was realized", name, object_get_typename(obj));
1839 return;
1842 visit_type_int(v, name, &value, &error);
1843 if (error) {
1844 error_propagate(errp, error);
1845 return;
1847 if (value < min || value > max) {
1848 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1849 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1850 object_get_typename(obj), name, value, min, max);
1851 return;
1854 if ((value != cpu->apic_id) && cpu_exists(value)) {
1855 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1856 return;
1858 cpu->apic_id = value;
1861 /* Generic getter for "feature-words" and "filtered-features" properties */
1862 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1863 const char *name, void *opaque,
1864 Error **errp)
1866 uint32_t *array = (uint32_t *)opaque;
1867 FeatureWord w;
1868 Error *err = NULL;
1869 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1870 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1871 X86CPUFeatureWordInfoList *list = NULL;
1873 for (w = 0; w < FEATURE_WORDS; w++) {
1874 FeatureWordInfo *wi = &feature_word_info[w];
1875 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1876 qwi->cpuid_input_eax = wi->cpuid_eax;
1877 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1878 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1879 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1880 qwi->features = array[w];
1882 /* List will be in reverse order, but order shouldn't matter */
1883 list_entries[w].next = list;
1884 list_entries[w].value = &word_infos[w];
1885 list = &list_entries[w];
1888 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1889 error_propagate(errp, err);
1892 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1893 void *opaque, Error **errp)
1895 X86CPU *cpu = X86_CPU(obj);
1896 int64_t value = cpu->hyperv_spinlock_attempts;
1898 visit_type_int(v, name, &value, errp);
1901 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1902 void *opaque, Error **errp)
1904 const int64_t min = 0xFFF;
1905 const int64_t max = UINT_MAX;
1906 X86CPU *cpu = X86_CPU(obj);
1907 Error *err = NULL;
1908 int64_t value;
1910 visit_type_int(v, name, &value, &err);
1911 if (err) {
1912 error_propagate(errp, err);
1913 return;
1916 if (value < min || value > max) {
1917 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1918 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1919 object_get_typename(obj), name ? name : "null",
1920 value, min, max);
1921 return;
1923 cpu->hyperv_spinlock_attempts = value;
1926 static PropertyInfo qdev_prop_spinlocks = {
1927 .name = "int",
1928 .get = x86_get_hv_spinlocks,
1929 .set = x86_set_hv_spinlocks,
1932 /* Convert all '_' in a feature string option name to '-', to make feature
1933 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1935 static inline void feat2prop(char *s)
1937 while ((s = strchr(s, '_'))) {
1938 *s = '-';
1942 /* Parse "+feature,-feature,feature=foo" CPU feature string
1944 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1945 Error **errp)
1947 X86CPU *cpu = X86_CPU(cs);
1948 char *featurestr; /* Single 'key=value" string being parsed */
1949 FeatureWord w;
1950 /* Features to be added */
1951 FeatureWordArray plus_features = { 0 };
1952 /* Features to be removed */
1953 FeatureWordArray minus_features = { 0 };
1954 uint32_t numvalue;
1955 CPUX86State *env = &cpu->env;
1956 Error *local_err = NULL;
1958 featurestr = features ? strtok(features, ",") : NULL;
1960 while (featurestr) {
1961 char *val;
1962 if (featurestr[0] == '+') {
1963 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1964 } else if (featurestr[0] == '-') {
1965 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1966 } else if ((val = strchr(featurestr, '='))) {
1967 *val = 0; val++;
1968 feat2prop(featurestr);
1969 if (!strcmp(featurestr, "xlevel")) {
1970 char *err;
1971 char num[32];
1973 numvalue = strtoul(val, &err, 0);
1974 if (!*val || *err) {
1975 error_setg(errp, "bad numerical value %s", val);
1976 return;
1978 if (numvalue < 0x80000000) {
1979 error_report("xlevel value shall always be >= 0x80000000"
1980 ", fixup will be removed in future versions");
1981 numvalue += 0x80000000;
1983 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1984 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1985 } else if (!strcmp(featurestr, "tsc-freq")) {
1986 int64_t tsc_freq;
1987 char *err;
1988 char num[32];
1990 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1991 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1992 if (tsc_freq < 0 || *err) {
1993 error_setg(errp, "bad numerical value %s", val);
1994 return;
1996 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1997 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1998 &local_err);
1999 } else if (!strcmp(featurestr, "hv-spinlocks")) {
2000 char *err;
2001 const int min = 0xFFF;
2002 char num[32];
2003 numvalue = strtoul(val, &err, 0);
2004 if (!*val || *err) {
2005 error_setg(errp, "bad numerical value %s", val);
2006 return;
2008 if (numvalue < min) {
2009 error_report("hv-spinlocks value shall always be >= 0x%x"
2010 ", fixup will be removed in future versions",
2011 min);
2012 numvalue = min;
2014 snprintf(num, sizeof(num), "%" PRId32, numvalue);
2015 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
2016 } else {
2017 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
2019 } else {
2020 feat2prop(featurestr);
2021 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
2023 if (local_err) {
2024 error_propagate(errp, local_err);
2025 return;
2027 featurestr = strtok(NULL, ",");
2030 if (cpu->host_features) {
2031 for (w = 0; w < FEATURE_WORDS; w++) {
2032 env->features[w] =
2033 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2037 for (w = 0; w < FEATURE_WORDS; w++) {
2038 env->features[w] |= plus_features[w];
2039 env->features[w] &= ~minus_features[w];
2043 /* Print all cpuid feature names in featureset
2045 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2047 int bit;
2048 bool first = true;
2050 for (bit = 0; bit < 32; bit++) {
2051 if (featureset[bit]) {
2052 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2053 first = false;
2058 /* generate CPU information. */
2059 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2061 X86CPUDefinition *def;
2062 char buf[256];
2063 int i;
2065 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2066 def = &builtin_x86_defs[i];
2067 snprintf(buf, sizeof(buf), "%s", def->name);
2068 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2070 #ifdef CONFIG_KVM
2071 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2072 "KVM processor with all supported host features "
2073 "(only available in KVM mode)");
2074 #endif
2076 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2077 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2078 FeatureWordInfo *fw = &feature_word_info[i];
2080 (*cpu_fprintf)(f, " ");
2081 listflags(f, cpu_fprintf, fw->feat_names);
2082 (*cpu_fprintf)(f, "\n");
2086 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2088 CpuDefinitionInfoList *cpu_list = NULL;
2089 X86CPUDefinition *def;
2090 int i;
2092 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2093 CpuDefinitionInfoList *entry;
2094 CpuDefinitionInfo *info;
2096 def = &builtin_x86_defs[i];
2097 info = g_malloc0(sizeof(*info));
2098 info->name = g_strdup(def->name);
2100 entry = g_malloc0(sizeof(*entry));
2101 entry->value = info;
2102 entry->next = cpu_list;
2103 cpu_list = entry;
2106 return cpu_list;
2109 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2110 bool migratable_only)
2112 FeatureWordInfo *wi = &feature_word_info[w];
2113 uint32_t r;
2115 if (kvm_enabled()) {
2116 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2117 wi->cpuid_ecx,
2118 wi->cpuid_reg);
2119 } else if (tcg_enabled()) {
2120 r = wi->tcg_features;
2121 } else {
2122 return ~0;
2124 if (migratable_only) {
2125 r &= x86_cpu_get_migratable_flags(w);
2127 return r;
2131 * Filters CPU feature words based on host availability of each feature.
2133 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2135 static int x86_cpu_filter_features(X86CPU *cpu)
2137 CPUX86State *env = &cpu->env;
2138 FeatureWord w;
2139 int rv = 0;
2141 for (w = 0; w < FEATURE_WORDS; w++) {
2142 uint32_t host_feat =
2143 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2144 uint32_t requested_features = env->features[w];
2145 env->features[w] &= host_feat;
2146 cpu->filtered_features[w] = requested_features & ~env->features[w];
2147 if (cpu->filtered_features[w]) {
2148 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2149 report_unavailable_features(w, cpu->filtered_features[w]);
2151 rv = 1;
2155 return rv;
2158 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2160 PropValue *pv;
2161 for (pv = props; pv->prop; pv++) {
2162 if (!pv->value) {
2163 continue;
2165 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2166 &error_abort);
2170 /* Load data from X86CPUDefinition
2172 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2174 CPUX86State *env = &cpu->env;
2175 const char *vendor;
2176 char host_vendor[CPUID_VENDOR_SZ + 1];
2177 FeatureWord w;
2179 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2180 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2181 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2182 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2183 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2184 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2185 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2186 for (w = 0; w < FEATURE_WORDS; w++) {
2187 env->features[w] = def->features[w];
2190 /* Special cases not set in the X86CPUDefinition structs: */
2191 if (kvm_enabled()) {
2192 if (!kvm_irqchip_in_kernel()) {
2193 x86_cpu_change_kvm_default("x2apic", "off");
2196 x86_cpu_apply_props(cpu, kvm_default_props);
2199 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2201 /* sysenter isn't supported in compatibility mode on AMD,
2202 * syscall isn't supported in compatibility mode on Intel.
2203 * Normally we advertise the actual CPU vendor, but you can
2204 * override this using the 'vendor' property if you want to use
2205 * KVM's sysenter/syscall emulation in compatibility mode and
2206 * when doing cross vendor migration
2208 vendor = def->vendor;
2209 if (kvm_enabled()) {
2210 uint32_t ebx = 0, ecx = 0, edx = 0;
2211 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2212 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2213 vendor = host_vendor;
2216 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2220 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2222 X86CPU *cpu = NULL;
2223 X86CPUClass *xcc;
2224 ObjectClass *oc;
2225 gchar **model_pieces;
2226 char *name, *features;
2227 Error *error = NULL;
2229 model_pieces = g_strsplit(cpu_model, ",", 2);
2230 if (!model_pieces[0]) {
2231 error_setg(&error, "Invalid/empty CPU model name");
2232 goto out;
2234 name = model_pieces[0];
2235 features = model_pieces[1];
2237 oc = x86_cpu_class_by_name(name);
2238 if (oc == NULL) {
2239 error_setg(&error, "Unable to find CPU definition: %s", name);
2240 goto out;
2242 xcc = X86_CPU_CLASS(oc);
2244 if (xcc->kvm_required && !kvm_enabled()) {
2245 error_setg(&error, "CPU model '%s' requires KVM", name);
2246 goto out;
2249 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2251 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2252 if (error) {
2253 goto out;
2256 out:
2257 if (error != NULL) {
2258 error_propagate(errp, error);
2259 if (cpu) {
2260 object_unref(OBJECT(cpu));
2261 cpu = NULL;
2264 g_strfreev(model_pieces);
2265 return cpu;
2268 X86CPU *cpu_x86_init(const char *cpu_model)
2270 Error *error = NULL;
2271 X86CPU *cpu;
2273 cpu = cpu_x86_create(cpu_model, &error);
2274 if (error) {
2275 goto out;
2278 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2280 out:
2281 if (error) {
2282 error_report_err(error);
2283 if (cpu != NULL) {
2284 object_unref(OBJECT(cpu));
2285 cpu = NULL;
2288 return cpu;
2291 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2293 X86CPUDefinition *cpudef = data;
2294 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2296 xcc->cpu_def = cpudef;
2299 static void x86_register_cpudef_type(X86CPUDefinition *def)
2301 char *typename = x86_cpu_type_name(def->name);
2302 TypeInfo ti = {
2303 .name = typename,
2304 .parent = TYPE_X86_CPU,
2305 .class_init = x86_cpu_cpudef_class_init,
2306 .class_data = def,
2309 type_register(&ti);
2310 g_free(typename);
2313 #if !defined(CONFIG_USER_ONLY)
2315 void cpu_clear_apic_feature(CPUX86State *env)
2317 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2320 #endif /* !CONFIG_USER_ONLY */
2322 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2323 uint32_t *eax, uint32_t *ebx,
2324 uint32_t *ecx, uint32_t *edx)
2326 X86CPU *cpu = x86_env_get_cpu(env);
2327 CPUState *cs = CPU(cpu);
2329 /* test if maximum index reached */
2330 if (index & 0x80000000) {
2331 if (index > env->cpuid_xlevel) {
2332 if (env->cpuid_xlevel2 > 0) {
2333 /* Handle the Centaur's CPUID instruction. */
2334 if (index > env->cpuid_xlevel2) {
2335 index = env->cpuid_xlevel2;
2336 } else if (index < 0xC0000000) {
2337 index = env->cpuid_xlevel;
2339 } else {
2340 /* Intel documentation states that invalid EAX input will
2341 * return the same information as EAX=cpuid_level
2342 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2344 index = env->cpuid_level;
2347 } else {
2348 if (index > env->cpuid_level)
2349 index = env->cpuid_level;
2352 switch(index) {
2353 case 0:
2354 *eax = env->cpuid_level;
2355 *ebx = env->cpuid_vendor1;
2356 *edx = env->cpuid_vendor2;
2357 *ecx = env->cpuid_vendor3;
2358 break;
2359 case 1:
2360 *eax = env->cpuid_version;
2361 *ebx = (cpu->apic_id << 24) |
2362 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2363 *ecx = env->features[FEAT_1_ECX];
2364 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2365 *ecx |= CPUID_EXT_OSXSAVE;
2367 *edx = env->features[FEAT_1_EDX];
2368 if (cs->nr_cores * cs->nr_threads > 1) {
2369 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2370 *edx |= CPUID_HT;
2372 break;
2373 case 2:
2374 /* cache info: needed for Pentium Pro compatibility */
2375 if (cpu->cache_info_passthrough) {
2376 host_cpuid(index, 0, eax, ebx, ecx, edx);
2377 break;
2379 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2380 *ebx = 0;
2381 *ecx = 0;
2382 *edx = (L1D_DESCRIPTOR << 16) | \
2383 (L1I_DESCRIPTOR << 8) | \
2384 (L2_DESCRIPTOR);
2385 break;
2386 case 4:
2387 /* cache info: needed for Core compatibility */
2388 if (cpu->cache_info_passthrough) {
2389 host_cpuid(index, count, eax, ebx, ecx, edx);
2390 *eax &= ~0xFC000000;
2391 } else {
2392 *eax = 0;
2393 switch (count) {
2394 case 0: /* L1 dcache info */
2395 *eax |= CPUID_4_TYPE_DCACHE | \
2396 CPUID_4_LEVEL(1) | \
2397 CPUID_4_SELF_INIT_LEVEL;
2398 *ebx = (L1D_LINE_SIZE - 1) | \
2399 ((L1D_PARTITIONS - 1) << 12) | \
2400 ((L1D_ASSOCIATIVITY - 1) << 22);
2401 *ecx = L1D_SETS - 1;
2402 *edx = CPUID_4_NO_INVD_SHARING;
2403 break;
2404 case 1: /* L1 icache info */
2405 *eax |= CPUID_4_TYPE_ICACHE | \
2406 CPUID_4_LEVEL(1) | \
2407 CPUID_4_SELF_INIT_LEVEL;
2408 *ebx = (L1I_LINE_SIZE - 1) | \
2409 ((L1I_PARTITIONS - 1) << 12) | \
2410 ((L1I_ASSOCIATIVITY - 1) << 22);
2411 *ecx = L1I_SETS - 1;
2412 *edx = CPUID_4_NO_INVD_SHARING;
2413 break;
2414 case 2: /* L2 cache info */
2415 *eax |= CPUID_4_TYPE_UNIFIED | \
2416 CPUID_4_LEVEL(2) | \
2417 CPUID_4_SELF_INIT_LEVEL;
2418 if (cs->nr_threads > 1) {
2419 *eax |= (cs->nr_threads - 1) << 14;
2421 *ebx = (L2_LINE_SIZE - 1) | \
2422 ((L2_PARTITIONS - 1) << 12) | \
2423 ((L2_ASSOCIATIVITY - 1) << 22);
2424 *ecx = L2_SETS - 1;
2425 *edx = CPUID_4_NO_INVD_SHARING;
2426 break;
2427 default: /* end of info */
2428 *eax = 0;
2429 *ebx = 0;
2430 *ecx = 0;
2431 *edx = 0;
2432 break;
2436 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2437 if ((*eax & 31) && cs->nr_cores > 1) {
2438 *eax |= (cs->nr_cores - 1) << 26;
2440 break;
2441 case 5:
2442 /* mwait info: needed for Core compatibility */
2443 *eax = 0; /* Smallest monitor-line size in bytes */
2444 *ebx = 0; /* Largest monitor-line size in bytes */
2445 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2446 *edx = 0;
2447 break;
2448 case 6:
2449 /* Thermal and Power Leaf */
2450 *eax = env->features[FEAT_6_EAX];
2451 *ebx = 0;
2452 *ecx = 0;
2453 *edx = 0;
2454 break;
2455 case 7:
2456 /* Structured Extended Feature Flags Enumeration Leaf */
2457 if (count == 0) {
2458 *eax = 0; /* Maximum ECX value for sub-leaves */
2459 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2460 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2461 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2462 *ecx |= CPUID_7_0_ECX_OSPKE;
2464 *edx = 0; /* Reserved */
2465 } else {
2466 *eax = 0;
2467 *ebx = 0;
2468 *ecx = 0;
2469 *edx = 0;
2471 break;
2472 case 9:
2473 /* Direct Cache Access Information Leaf */
2474 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2475 *ebx = 0;
2476 *ecx = 0;
2477 *edx = 0;
2478 break;
2479 case 0xA:
2480 /* Architectural Performance Monitoring Leaf */
2481 if (kvm_enabled() && cpu->enable_pmu) {
2482 KVMState *s = cs->kvm_state;
2484 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2485 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2486 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2487 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2488 } else {
2489 *eax = 0;
2490 *ebx = 0;
2491 *ecx = 0;
2492 *edx = 0;
2494 break;
2495 case 0xD: {
2496 KVMState *s = cs->kvm_state;
2497 uint64_t ena_mask;
2498 int i;
2500 /* Processor Extended State */
2501 *eax = 0;
2502 *ebx = 0;
2503 *ecx = 0;
2504 *edx = 0;
2505 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2506 break;
2508 if (kvm_enabled()) {
2509 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2510 ena_mask <<= 32;
2511 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2512 } else {
2513 ena_mask = -1;
2516 if (count == 0) {
2517 *ecx = 0x240;
2518 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2519 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2520 if ((env->features[esa->feature] & esa->bits) == esa->bits
2521 && ((ena_mask >> i) & 1) != 0) {
2522 if (i < 32) {
2523 *eax |= 1u << i;
2524 } else {
2525 *edx |= 1u << (i - 32);
2527 *ecx = MAX(*ecx, esa->offset + esa->size);
2530 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2531 *ebx = *ecx;
2532 } else if (count == 1) {
2533 *eax = env->features[FEAT_XSAVE];
2534 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2535 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2536 if ((env->features[esa->feature] & esa->bits) == esa->bits
2537 && ((ena_mask >> count) & 1) != 0) {
2538 *eax = esa->size;
2539 *ebx = esa->offset;
2542 break;
2544 case 0x80000000:
2545 *eax = env->cpuid_xlevel;
2546 *ebx = env->cpuid_vendor1;
2547 *edx = env->cpuid_vendor2;
2548 *ecx = env->cpuid_vendor3;
2549 break;
2550 case 0x80000001:
2551 *eax = env->cpuid_version;
2552 *ebx = 0;
2553 *ecx = env->features[FEAT_8000_0001_ECX];
2554 *edx = env->features[FEAT_8000_0001_EDX];
2556 /* The Linux kernel checks for the CMPLegacy bit and
2557 * discards multiple thread information if it is set.
2558 * So don't set it here for Intel to make Linux guests happy.
2560 if (cs->nr_cores * cs->nr_threads > 1) {
2561 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2562 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2563 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2564 *ecx |= 1 << 1; /* CmpLegacy bit */
2567 break;
2568 case 0x80000002:
2569 case 0x80000003:
2570 case 0x80000004:
2571 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2572 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2573 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2574 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2575 break;
2576 case 0x80000005:
2577 /* cache info (L1 cache) */
2578 if (cpu->cache_info_passthrough) {
2579 host_cpuid(index, 0, eax, ebx, ecx, edx);
2580 break;
2582 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2583 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2584 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2585 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2586 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2587 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2588 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2589 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2590 break;
2591 case 0x80000006:
2592 /* cache info (L2 cache) */
2593 if (cpu->cache_info_passthrough) {
2594 host_cpuid(index, 0, eax, ebx, ecx, edx);
2595 break;
2597 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2598 (L2_DTLB_2M_ENTRIES << 16) | \
2599 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2600 (L2_ITLB_2M_ENTRIES);
2601 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2602 (L2_DTLB_4K_ENTRIES << 16) | \
2603 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2604 (L2_ITLB_4K_ENTRIES);
2605 *ecx = (L2_SIZE_KB_AMD << 16) | \
2606 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2607 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2608 *edx = ((L3_SIZE_KB/512) << 18) | \
2609 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2610 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2611 break;
2612 case 0x80000007:
2613 *eax = 0;
2614 *ebx = 0;
2615 *ecx = 0;
2616 *edx = env->features[FEAT_8000_0007_EDX];
2617 break;
2618 case 0x80000008:
2619 /* virtual & phys address size in low 2 bytes. */
2620 /* XXX: This value must match the one used in the MMU code. */
2621 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2622 /* 64 bit processor */
2623 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2624 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2625 } else {
2626 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2627 *eax = 0x00000024; /* 36 bits physical */
2628 } else {
2629 *eax = 0x00000020; /* 32 bits physical */
2632 *ebx = 0;
2633 *ecx = 0;
2634 *edx = 0;
2635 if (cs->nr_cores * cs->nr_threads > 1) {
2636 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2638 break;
2639 case 0x8000000A:
2640 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2641 *eax = 0x00000001; /* SVM Revision */
2642 *ebx = 0x00000010; /* nr of ASIDs */
2643 *ecx = 0;
2644 *edx = env->features[FEAT_SVM]; /* optional features */
2645 } else {
2646 *eax = 0;
2647 *ebx = 0;
2648 *ecx = 0;
2649 *edx = 0;
2651 break;
2652 case 0xC0000000:
2653 *eax = env->cpuid_xlevel2;
2654 *ebx = 0;
2655 *ecx = 0;
2656 *edx = 0;
2657 break;
2658 case 0xC0000001:
2659 /* Support for VIA CPU's CPUID instruction */
2660 *eax = env->cpuid_version;
2661 *ebx = 0;
2662 *ecx = 0;
2663 *edx = env->features[FEAT_C000_0001_EDX];
2664 break;
2665 case 0xC0000002:
2666 case 0xC0000003:
2667 case 0xC0000004:
2668 /* Reserved for the future, and now filled with zero */
2669 *eax = 0;
2670 *ebx = 0;
2671 *ecx = 0;
2672 *edx = 0;
2673 break;
2674 default:
2675 /* reserved values: zero */
2676 *eax = 0;
2677 *ebx = 0;
2678 *ecx = 0;
2679 *edx = 0;
2680 break;
2684 /* CPUClass::reset() */
2685 static void x86_cpu_reset(CPUState *s)
2687 X86CPU *cpu = X86_CPU(s);
2688 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2689 CPUX86State *env = &cpu->env;
2690 target_ulong cr4;
2691 uint64_t xcr0;
2692 int i;
2694 xcc->parent_reset(s);
2696 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2698 tlb_flush(s, 1);
2700 env->old_exception = -1;
2702 /* init to reset state */
2704 #ifdef CONFIG_SOFTMMU
2705 env->hflags |= HF_SOFTMMU_MASK;
2706 #endif
2707 env->hflags2 |= HF2_GIF_MASK;
2709 cpu_x86_update_cr0(env, 0x60000010);
2710 env->a20_mask = ~0x0;
2711 env->smbase = 0x30000;
2713 env->idt.limit = 0xffff;
2714 env->gdt.limit = 0xffff;
2715 env->ldt.limit = 0xffff;
2716 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2717 env->tr.limit = 0xffff;
2718 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2720 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2721 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2722 DESC_R_MASK | DESC_A_MASK);
2723 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2724 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2725 DESC_A_MASK);
2726 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2727 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2728 DESC_A_MASK);
2729 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2730 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2731 DESC_A_MASK);
2732 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2733 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2734 DESC_A_MASK);
2735 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2736 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2737 DESC_A_MASK);
2739 env->eip = 0xfff0;
2740 env->regs[R_EDX] = env->cpuid_version;
2742 env->eflags = 0x2;
2744 /* FPU init */
2745 for (i = 0; i < 8; i++) {
2746 env->fptags[i] = 1;
2748 cpu_set_fpuc(env, 0x37f);
2750 env->mxcsr = 0x1f80;
2751 /* All units are in INIT state. */
2752 env->xstate_bv = 0;
2754 env->pat = 0x0007040600070406ULL;
2755 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2757 memset(env->dr, 0, sizeof(env->dr));
2758 env->dr[6] = DR6_FIXED_1;
2759 env->dr[7] = DR7_FIXED_1;
2760 cpu_breakpoint_remove_all(s, BP_CPU);
2761 cpu_watchpoint_remove_all(s, BP_CPU);
2763 cr4 = 0;
2764 xcr0 = XSTATE_FP_MASK;
2766 #ifdef CONFIG_USER_ONLY
2767 /* Enable all the features for user-mode. */
2768 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2769 xcr0 |= XSTATE_SSE_MASK;
2771 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2772 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2773 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2774 xcr0 |= 1ull << i;
2778 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2779 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2781 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2782 cr4 |= CR4_FSGSBASE_MASK;
2784 #endif
2786 env->xcr0 = xcr0;
2787 cpu_x86_update_cr4(env, cr4);
2790 * SDM 11.11.5 requires:
2791 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2792 * - IA32_MTRR_PHYSMASKn.V = 0
2793 * All other bits are undefined. For simplification, zero it all.
2795 env->mtrr_deftype = 0;
2796 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2797 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2799 #if !defined(CONFIG_USER_ONLY)
2800 /* We hard-wire the BSP to the first CPU. */
2801 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2803 s->halted = !cpu_is_bsp(cpu);
2805 if (kvm_enabled()) {
2806 kvm_arch_reset_vcpu(cpu);
2808 #endif
2811 #ifndef CONFIG_USER_ONLY
2812 bool cpu_is_bsp(X86CPU *cpu)
2814 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2817 /* TODO: remove me, when reset over QOM tree is implemented */
2818 static void x86_cpu_machine_reset_cb(void *opaque)
2820 X86CPU *cpu = opaque;
2821 cpu_reset(CPU(cpu));
2823 #endif
2825 static void mce_init(X86CPU *cpu)
2827 CPUX86State *cenv = &cpu->env;
2828 unsigned int bank;
2830 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2831 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2832 (CPUID_MCE | CPUID_MCA)) {
2833 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2834 cenv->mcg_ctl = ~(uint64_t)0;
2835 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2836 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2841 #ifndef CONFIG_USER_ONLY
2842 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2844 APICCommonState *apic;
2845 const char *apic_type = "apic";
2847 if (kvm_apic_in_kernel()) {
2848 apic_type = "kvm-apic";
2849 } else if (xen_enabled()) {
2850 apic_type = "xen-apic";
2853 cpu->apic_state = DEVICE(object_new(apic_type));
2855 object_property_add_child(OBJECT(cpu), "apic",
2856 OBJECT(cpu->apic_state), NULL);
2857 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2858 /* TODO: convert to link<> */
2859 apic = APIC_COMMON(cpu->apic_state);
2860 apic->cpu = cpu;
2861 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2864 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2866 APICCommonState *apic;
2867 static bool apic_mmio_map_once;
2869 if (cpu->apic_state == NULL) {
2870 return;
2872 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2873 errp);
2875 /* Map APIC MMIO area */
2876 apic = APIC_COMMON(cpu->apic_state);
2877 if (!apic_mmio_map_once) {
2878 memory_region_add_subregion_overlap(get_system_memory(),
2879 apic->apicbase &
2880 MSR_IA32_APICBASE_BASE,
2881 &apic->io_memory,
2882 0x1000);
2883 apic_mmio_map_once = true;
2887 static void x86_cpu_machine_done(Notifier *n, void *unused)
2889 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2890 MemoryRegion *smram =
2891 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2893 if (smram) {
2894 cpu->smram = g_new(MemoryRegion, 1);
2895 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2896 smram, 0, 1ull << 32);
2897 memory_region_set_enabled(cpu->smram, false);
2898 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2901 #else
2902 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2905 #endif
2908 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2909 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2910 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2911 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2912 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2913 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2914 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2916 CPUState *cs = CPU(dev);
2917 X86CPU *cpu = X86_CPU(dev);
2918 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2919 CPUX86State *env = &cpu->env;
2920 Error *local_err = NULL;
2921 static bool ht_warned;
2923 if (cpu->apic_id < 0) {
2924 error_setg(errp, "apic-id property was not initialized properly");
2925 return;
2928 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2929 env->cpuid_level = 7;
2932 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2933 error_setg(&local_err,
2934 kvm_enabled() ?
2935 "Host doesn't support requested features" :
2936 "TCG doesn't support requested features");
2937 goto out;
2940 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2941 * CPUID[1].EDX.
2943 if (IS_AMD_CPU(env)) {
2944 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2945 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2946 & CPUID_EXT2_AMD_ALIASES);
2950 cpu_exec_init(cs, &error_abort);
2952 if (tcg_enabled()) {
2953 tcg_x86_init();
2956 #ifndef CONFIG_USER_ONLY
2957 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2959 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2960 x86_cpu_apic_create(cpu, &local_err);
2961 if (local_err != NULL) {
2962 goto out;
2965 #endif
2967 mce_init(cpu);
2969 #ifndef CONFIG_USER_ONLY
2970 if (tcg_enabled()) {
2971 AddressSpace *newas = g_new(AddressSpace, 1);
2973 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2974 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2976 /* Outer container... */
2977 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2978 memory_region_set_enabled(cpu->cpu_as_root, true);
2980 /* ... with two regions inside: normal system memory with low
2981 * priority, and...
2983 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2984 get_system_memory(), 0, ~0ull);
2985 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2986 memory_region_set_enabled(cpu->cpu_as_mem, true);
2987 address_space_init(newas, cpu->cpu_as_root, "CPU");
2988 cs->num_ases = 1;
2989 cpu_address_space_init(cs, newas, 0);
2991 /* ... SMRAM with higher priority, linked from /machine/smram. */
2992 cpu->machine_done.notify = x86_cpu_machine_done;
2993 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2995 #endif
2997 qemu_init_vcpu(cs);
2999 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3000 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3001 * based on inputs (sockets,cores,threads), it is still better to gives
3002 * users a warning.
3004 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3005 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3007 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3008 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3009 " -smp options properly.");
3010 ht_warned = true;
3013 x86_cpu_apic_realize(cpu, &local_err);
3014 if (local_err != NULL) {
3015 goto out;
3017 cpu_reset(cs);
3019 xcc->parent_realize(dev, &local_err);
3021 out:
3022 if (local_err != NULL) {
3023 error_propagate(errp, local_err);
3024 return;
3028 typedef struct BitProperty {
3029 uint32_t *ptr;
3030 uint32_t mask;
3031 } BitProperty;
3033 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3034 void *opaque, Error **errp)
3036 BitProperty *fp = opaque;
3037 bool value = (*fp->ptr & fp->mask) == fp->mask;
3038 visit_type_bool(v, name, &value, errp);
3041 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3042 void *opaque, Error **errp)
3044 DeviceState *dev = DEVICE(obj);
3045 BitProperty *fp = opaque;
3046 Error *local_err = NULL;
3047 bool value;
3049 if (dev->realized) {
3050 qdev_prop_set_after_realize(dev, name, errp);
3051 return;
3054 visit_type_bool(v, name, &value, &local_err);
3055 if (local_err) {
3056 error_propagate(errp, local_err);
3057 return;
3060 if (value) {
3061 *fp->ptr |= fp->mask;
3062 } else {
3063 *fp->ptr &= ~fp->mask;
3067 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3068 void *opaque)
3070 BitProperty *prop = opaque;
3071 g_free(prop);
3074 /* Register a boolean property to get/set a single bit in a uint32_t field.
3076 * The same property name can be registered multiple times to make it affect
3077 * multiple bits in the same FeatureWord. In that case, the getter will return
3078 * true only if all bits are set.
3080 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3081 const char *prop_name,
3082 uint32_t *field,
3083 int bitnr)
3085 BitProperty *fp;
3086 ObjectProperty *op;
3087 uint32_t mask = (1UL << bitnr);
3089 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3090 if (op) {
3091 fp = op->opaque;
3092 assert(fp->ptr == field);
3093 fp->mask |= mask;
3094 } else {
3095 fp = g_new0(BitProperty, 1);
3096 fp->ptr = field;
3097 fp->mask = mask;
3098 object_property_add(OBJECT(cpu), prop_name, "bool",
3099 x86_cpu_get_bit_prop,
3100 x86_cpu_set_bit_prop,
3101 x86_cpu_release_bit_prop, fp, &error_abort);
3105 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3106 FeatureWord w,
3107 int bitnr)
3109 Object *obj = OBJECT(cpu);
3110 int i;
3111 char **names;
3112 FeatureWordInfo *fi = &feature_word_info[w];
3114 if (!fi->feat_names) {
3115 return;
3117 if (!fi->feat_names[bitnr]) {
3118 return;
3121 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3123 feat2prop(names[0]);
3124 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3126 for (i = 1; names[i]; i++) {
3127 feat2prop(names[i]);
3128 object_property_add_alias(obj, names[i], obj, names[0],
3129 &error_abort);
3132 g_strfreev(names);
3135 static void x86_cpu_initfn(Object *obj)
3137 CPUState *cs = CPU(obj);
3138 X86CPU *cpu = X86_CPU(obj);
3139 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3140 CPUX86State *env = &cpu->env;
3141 FeatureWord w;
3143 cs->env_ptr = env;
3145 object_property_add(obj, "family", "int",
3146 x86_cpuid_version_get_family,
3147 x86_cpuid_version_set_family, NULL, NULL, NULL);
3148 object_property_add(obj, "model", "int",
3149 x86_cpuid_version_get_model,
3150 x86_cpuid_version_set_model, NULL, NULL, NULL);
3151 object_property_add(obj, "stepping", "int",
3152 x86_cpuid_version_get_stepping,
3153 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3154 object_property_add_str(obj, "vendor",
3155 x86_cpuid_get_vendor,
3156 x86_cpuid_set_vendor, NULL);
3157 object_property_add_str(obj, "model-id",
3158 x86_cpuid_get_model_id,
3159 x86_cpuid_set_model_id, NULL);
3160 object_property_add(obj, "tsc-frequency", "int",
3161 x86_cpuid_get_tsc_freq,
3162 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3163 object_property_add(obj, "apic-id", "int",
3164 x86_cpuid_get_apic_id,
3165 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3166 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3167 x86_cpu_get_feature_words,
3168 NULL, NULL, (void *)env->features, NULL);
3169 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3170 x86_cpu_get_feature_words,
3171 NULL, NULL, (void *)cpu->filtered_features, NULL);
3173 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3175 #ifndef CONFIG_USER_ONLY
3176 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3177 cpu->apic_id = -1;
3178 #endif
3180 for (w = 0; w < FEATURE_WORDS; w++) {
3181 int bitnr;
3183 for (bitnr = 0; bitnr < 32; bitnr++) {
3184 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3188 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3191 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3193 X86CPU *cpu = X86_CPU(cs);
3195 return cpu->apic_id;
3198 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3200 X86CPU *cpu = X86_CPU(cs);
3202 return cpu->env.cr[0] & CR0_PG_MASK;
3205 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3207 X86CPU *cpu = X86_CPU(cs);
3209 cpu->env.eip = value;
3212 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3214 X86CPU *cpu = X86_CPU(cs);
3216 cpu->env.eip = tb->pc - tb->cs_base;
3219 static bool x86_cpu_has_work(CPUState *cs)
3221 X86CPU *cpu = X86_CPU(cs);
3222 CPUX86State *env = &cpu->env;
3224 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3225 CPU_INTERRUPT_POLL)) &&
3226 (env->eflags & IF_MASK)) ||
3227 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3228 CPU_INTERRUPT_INIT |
3229 CPU_INTERRUPT_SIPI |
3230 CPU_INTERRUPT_MCE)) ||
3231 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3232 !(env->hflags & HF_SMM_MASK));
3235 static Property x86_cpu_properties[] = {
3236 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3237 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3238 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3239 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3240 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3241 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3242 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3243 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3244 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3245 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3246 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3247 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3248 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3249 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3250 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3251 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3252 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3253 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3254 DEFINE_PROP_END_OF_LIST()
3257 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3259 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3260 CPUClass *cc = CPU_CLASS(oc);
3261 DeviceClass *dc = DEVICE_CLASS(oc);
3263 xcc->parent_realize = dc->realize;
3264 dc->realize = x86_cpu_realizefn;
3265 dc->props = x86_cpu_properties;
3267 xcc->parent_reset = cc->reset;
3268 cc->reset = x86_cpu_reset;
3269 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3271 cc->class_by_name = x86_cpu_class_by_name;
3272 cc->parse_features = x86_cpu_parse_featurestr;
3273 cc->has_work = x86_cpu_has_work;
3274 cc->do_interrupt = x86_cpu_do_interrupt;
3275 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3276 cc->dump_state = x86_cpu_dump_state;
3277 cc->set_pc = x86_cpu_set_pc;
3278 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3279 cc->gdb_read_register = x86_cpu_gdb_read_register;
3280 cc->gdb_write_register = x86_cpu_gdb_write_register;
3281 cc->get_arch_id = x86_cpu_get_arch_id;
3282 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3283 #ifdef CONFIG_USER_ONLY
3284 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3285 #else
3286 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3287 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3288 cc->write_elf64_note = x86_cpu_write_elf64_note;
3289 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3290 cc->write_elf32_note = x86_cpu_write_elf32_note;
3291 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3292 cc->vmsd = &vmstate_x86_cpu;
3293 #endif
3294 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3295 #ifndef CONFIG_USER_ONLY
3296 cc->debug_excp_handler = breakpoint_handler;
3297 #endif
3298 cc->cpu_exec_enter = x86_cpu_exec_enter;
3299 cc->cpu_exec_exit = x86_cpu_exec_exit;
3302 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3303 * object in cpus -> dangling pointer after final object_unref().
3305 dc->cannot_destroy_with_object_finalize_yet = true;
3308 static const TypeInfo x86_cpu_type_info = {
3309 .name = TYPE_X86_CPU,
3310 .parent = TYPE_CPU,
3311 .instance_size = sizeof(X86CPU),
3312 .instance_init = x86_cpu_initfn,
3313 .abstract = true,
3314 .class_size = sizeof(X86CPUClass),
3315 .class_init = x86_cpu_common_class_init,
3318 static void x86_cpu_register_types(void)
3320 int i;
3322 type_register_static(&x86_cpu_type_info);
3323 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3324 x86_register_cpudef_type(&builtin_x86_defs[i]);
3326 #ifdef CONFIG_KVM
3327 type_register_static(&host_x86_cpu_type_info);
3328 #endif
3331 type_init(x86_cpu_register_types)