target-i386: Implement CPUID[0xB] (Extended Topology Enumeration)
[qemu/ar7.git] / target-i386 / cpu.c
blobf3f95cd0c4d2969423a79b5c57dade887a2ca907
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
473 #undef REGISTER
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = offsetof(X86XSaveArea, avx_state),
479 .size = sizeof(XSaveAVX) },
480 [XSTATE_BNDREGS_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = offsetof(X86XSaveArea, bndreg_state),
483 .size = sizeof(XSaveBNDREG) },
484 [XSTATE_BNDCSR_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
486 .offset = offsetof(X86XSaveArea, bndcsr_state),
487 .size = sizeof(XSaveBNDCSR) },
488 [XSTATE_OPMASK_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = offsetof(X86XSaveArea, opmask_state),
491 .size = sizeof(XSaveOpmask) },
492 [XSTATE_ZMM_Hi256_BIT] =
493 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
494 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
495 .size = sizeof(XSaveZMM_Hi256) },
496 [XSTATE_Hi16_ZMM_BIT] =
497 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
498 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
499 .size = sizeof(XSaveHi16_ZMM) },
500 [XSTATE_PKRU_BIT] =
501 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
502 .offset = offsetof(X86XSaveArea, pkru_state),
503 .size = sizeof(XSavePKRU) },
506 const char *get_register_name_32(unsigned int reg)
508 if (reg >= CPU_NB_REGS32) {
509 return NULL;
511 return x86_reg_info_32[reg].name;
515 * Returns the set of feature flags that are supported and migratable by
516 * QEMU, for a given FeatureWord.
518 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
520 FeatureWordInfo *wi = &feature_word_info[w];
521 uint32_t r = 0;
522 int i;
524 for (i = 0; i < 32; i++) {
525 uint32_t f = 1U << i;
526 /* If the feature name is unknown, it is not supported by QEMU yet */
527 if (!wi->feat_names[i]) {
528 continue;
530 /* Skip features known to QEMU, but explicitly marked as unmigratable */
531 if (wi->unmigratable_flags & f) {
532 continue;
534 r |= f;
536 return r;
539 void host_cpuid(uint32_t function, uint32_t count,
540 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
542 uint32_t vec[4];
544 #ifdef __x86_64__
545 asm volatile("cpuid"
546 : "=a"(vec[0]), "=b"(vec[1]),
547 "=c"(vec[2]), "=d"(vec[3])
548 : "0"(function), "c"(count) : "cc");
549 #elif defined(__i386__)
550 asm volatile("pusha \n\t"
551 "cpuid \n\t"
552 "mov %%eax, 0(%2) \n\t"
553 "mov %%ebx, 4(%2) \n\t"
554 "mov %%ecx, 8(%2) \n\t"
555 "mov %%edx, 12(%2) \n\t"
556 "popa"
557 : : "a"(function), "c"(count), "S"(vec)
558 : "memory", "cc");
559 #else
560 abort();
561 #endif
563 if (eax)
564 *eax = vec[0];
565 if (ebx)
566 *ebx = vec[1];
567 if (ecx)
568 *ecx = vec[2];
569 if (edx)
570 *edx = vec[3];
573 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
575 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
576 * a substring. ex if !NULL points to the first char after a substring,
577 * otherwise the string is assumed to sized by a terminating nul.
578 * Return lexical ordering of *s1:*s2.
580 static int sstrcmp(const char *s1, const char *e1,
581 const char *s2, const char *e2)
583 for (;;) {
584 if (!*s1 || !*s2 || *s1 != *s2)
585 return (*s1 - *s2);
586 ++s1, ++s2;
587 if (s1 == e1 && s2 == e2)
588 return (0);
589 else if (s1 == e1)
590 return (*s2);
591 else if (s2 == e2)
592 return (*s1);
596 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
597 * '|' delimited (possibly empty) strings in which case search for a match
598 * within the alternatives proceeds left to right. Return 0 for success,
599 * non-zero otherwise.
601 static int altcmp(const char *s, const char *e, const char *altstr)
603 const char *p, *q;
605 for (q = p = altstr; ; ) {
606 while (*p && *p != '|')
607 ++p;
608 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
609 return (0);
610 if (!*p)
611 return (1);
612 else
613 q = ++p;
617 /* search featureset for flag *[s..e), if found set corresponding bit in
618 * *pval and return true, otherwise return false
620 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
621 const char **featureset)
623 uint32_t mask;
624 const char **ppc;
625 bool found = false;
627 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
628 if (*ppc && !altcmp(s, e, *ppc)) {
629 *pval |= mask;
630 found = true;
633 return found;
636 static void add_flagname_to_bitmaps(const char *flagname,
637 FeatureWordArray words,
638 Error **errp)
640 FeatureWord w;
641 for (w = 0; w < FEATURE_WORDS; w++) {
642 FeatureWordInfo *wi = &feature_word_info[w];
643 if (wi->feat_names &&
644 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
645 break;
648 if (w == FEATURE_WORDS) {
649 error_setg(errp, "CPU feature %s not found", flagname);
653 /* CPU class name definitions: */
655 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
658 /* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
661 static char *x86_cpu_type_name(const char *model_name)
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
666 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
668 ObjectClass *oc;
669 char *typename;
671 if (cpu_model == NULL) {
672 return NULL;
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
681 struct X86CPUDefinition {
682 const char *name;
683 uint32_t level;
684 uint32_t xlevel;
685 uint32_t xlevel2;
686 /* vendor is zero-terminated, 12 character ASCII string */
687 char vendor[CPUID_VENDOR_SZ + 1];
688 int family;
689 int model;
690 int stepping;
691 FeatureWordArray features;
692 char model_id[48];
695 static X86CPUDefinition builtin_x86_defs[] = {
697 .name = "qemu64",
698 .level = 0xd,
699 .vendor = CPUID_VENDOR_AMD,
700 .family = 6,
701 .model = 6,
702 .stepping = 3,
703 .features[FEAT_1_EDX] =
704 PPRO_FEATURES |
705 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 CPUID_PSE36,
707 .features[FEAT_1_ECX] =
708 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
709 .features[FEAT_8000_0001_EDX] =
710 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
711 .features[FEAT_8000_0001_ECX] =
712 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
713 .xlevel = 0x8000000A,
714 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
717 .name = "phenom",
718 .level = 5,
719 .vendor = CPUID_VENDOR_AMD,
720 .family = 16,
721 .model = 2,
722 .stepping = 3,
723 /* Missing: CPUID_HT */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 CPUID_EXT_POPCNT,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
733 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
734 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
735 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_CR8LEG,
737 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
738 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
739 .features[FEAT_8000_0001_ECX] =
740 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
741 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
742 /* Missing: CPUID_SVM_LBRV */
743 .features[FEAT_SVM] =
744 CPUID_SVM_NPT,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
749 .name = "core2duo",
750 .level = 10,
751 .vendor = CPUID_VENDOR_INTEL,
752 .family = 6,
753 .model = 15,
754 .stepping = 11,
755 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
756 .features[FEAT_1_EDX] =
757 PPRO_FEATURES |
758 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
759 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
760 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
761 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
762 .features[FEAT_1_ECX] =
763 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 CPUID_EXT_CX16,
765 .features[FEAT_8000_0001_EDX] =
766 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
767 .features[FEAT_8000_0001_ECX] =
768 CPUID_EXT3_LAHF_LM,
769 .xlevel = 0x80000008,
770 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
773 .name = "kvm64",
774 .level = 0xd,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 15,
777 .model = 6,
778 .stepping = 1,
779 /* Missing: CPUID_HT */
780 .features[FEAT_1_EDX] =
781 PPRO_FEATURES | CPUID_VME |
782 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 CPUID_PSE36,
784 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
790 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
791 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
792 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
793 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
794 .features[FEAT_8000_0001_ECX] =
796 .xlevel = 0x80000008,
797 .model_id = "Common KVM processor"
800 .name = "qemu32",
801 .level = 4,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 6,
805 .stepping = 3,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3,
810 .xlevel = 0x80000004,
811 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
814 .name = "kvm32",
815 .level = 5,
816 .vendor = CPUID_VENDOR_INTEL,
817 .family = 15,
818 .model = 6,
819 .stepping = 1,
820 .features[FEAT_1_EDX] =
821 PPRO_FEATURES | CPUID_VME |
822 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
823 .features[FEAT_1_ECX] =
824 CPUID_EXT_SSE3,
825 .features[FEAT_8000_0001_ECX] =
827 .xlevel = 0x80000008,
828 .model_id = "Common 32-bit KVM processor"
831 .name = "coreduo",
832 .level = 10,
833 .vendor = CPUID_VENDOR_INTEL,
834 .family = 6,
835 .model = 14,
836 .stepping = 8,
837 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
838 .features[FEAT_1_EDX] =
839 PPRO_FEATURES | CPUID_VME |
840 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
841 CPUID_SS,
842 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
843 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
844 .features[FEAT_1_ECX] =
845 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
846 .features[FEAT_8000_0001_EDX] =
847 CPUID_EXT2_NX,
848 .xlevel = 0x80000008,
849 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
852 .name = "486",
853 .level = 1,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 4,
856 .model = 8,
857 .stepping = 0,
858 .features[FEAT_1_EDX] =
859 I486_FEATURES,
860 .xlevel = 0,
863 .name = "pentium",
864 .level = 1,
865 .vendor = CPUID_VENDOR_INTEL,
866 .family = 5,
867 .model = 4,
868 .stepping = 3,
869 .features[FEAT_1_EDX] =
870 PENTIUM_FEATURES,
871 .xlevel = 0,
874 .name = "pentium2",
875 .level = 2,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 6,
878 .model = 5,
879 .stepping = 2,
880 .features[FEAT_1_EDX] =
881 PENTIUM2_FEATURES,
882 .xlevel = 0,
885 .name = "pentium3",
886 .level = 3,
887 .vendor = CPUID_VENDOR_INTEL,
888 .family = 6,
889 .model = 7,
890 .stepping = 3,
891 .features[FEAT_1_EDX] =
892 PENTIUM3_FEATURES,
893 .xlevel = 0,
896 .name = "athlon",
897 .level = 2,
898 .vendor = CPUID_VENDOR_AMD,
899 .family = 6,
900 .model = 2,
901 .stepping = 3,
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
904 CPUID_MCA,
905 .features[FEAT_8000_0001_EDX] =
906 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
907 .xlevel = 0x80000008,
908 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
911 .name = "n270",
912 .level = 10,
913 .vendor = CPUID_VENDOR_INTEL,
914 .family = 6,
915 .model = 28,
916 .stepping = 2,
917 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
918 .features[FEAT_1_EDX] =
919 PPRO_FEATURES |
920 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
921 CPUID_ACPI | CPUID_SS,
922 /* Some CPUs got no CPUID_SEP */
923 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
924 * CPUID_EXT_XTPR */
925 .features[FEAT_1_ECX] =
926 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
927 CPUID_EXT_MOVBE,
928 .features[FEAT_8000_0001_EDX] =
929 CPUID_EXT2_NX,
930 .features[FEAT_8000_0001_ECX] =
931 CPUID_EXT3_LAHF_LM,
932 .xlevel = 0x80000008,
933 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
936 .name = "Conroe",
937 .level = 10,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 15,
941 .stepping = 3,
942 .features[FEAT_1_EDX] =
943 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
944 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
945 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
946 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
947 CPUID_DE | CPUID_FP87,
948 .features[FEAT_1_ECX] =
949 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
950 .features[FEAT_8000_0001_EDX] =
951 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
952 .features[FEAT_8000_0001_ECX] =
953 CPUID_EXT3_LAHF_LM,
954 .xlevel = 0x80000008,
955 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
958 .name = "Penryn",
959 .level = 10,
960 .vendor = CPUID_VENDOR_INTEL,
961 .family = 6,
962 .model = 23,
963 .stepping = 3,
964 .features[FEAT_1_EDX] =
965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
969 CPUID_DE | CPUID_FP87,
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
972 CPUID_EXT_SSE3,
973 .features[FEAT_8000_0001_EDX] =
974 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
975 .features[FEAT_8000_0001_ECX] =
976 CPUID_EXT3_LAHF_LM,
977 .xlevel = 0x80000008,
978 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
981 .name = "Nehalem",
982 .level = 11,
983 .vendor = CPUID_VENDOR_INTEL,
984 .family = 6,
985 .model = 26,
986 .stepping = 3,
987 .features[FEAT_1_EDX] =
988 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
989 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
990 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
991 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
992 CPUID_DE | CPUID_FP87,
993 .features[FEAT_1_ECX] =
994 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
995 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
996 .features[FEAT_8000_0001_EDX] =
997 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
998 .features[FEAT_8000_0001_ECX] =
999 CPUID_EXT3_LAHF_LM,
1000 .xlevel = 0x80000008,
1001 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1004 .name = "Westmere",
1005 .level = 11,
1006 .vendor = CPUID_VENDOR_INTEL,
1007 .family = 6,
1008 .model = 44,
1009 .stepping = 1,
1010 .features[FEAT_1_EDX] =
1011 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1012 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1013 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1014 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1015 CPUID_DE | CPUID_FP87,
1016 .features[FEAT_1_ECX] =
1017 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1018 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1019 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1020 .features[FEAT_8000_0001_EDX] =
1021 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1022 .features[FEAT_8000_0001_ECX] =
1023 CPUID_EXT3_LAHF_LM,
1024 .features[FEAT_6_EAX] =
1025 CPUID_6_EAX_ARAT,
1026 .xlevel = 0x80000008,
1027 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1030 .name = "SandyBridge",
1031 .level = 0xd,
1032 .vendor = CPUID_VENDOR_INTEL,
1033 .family = 6,
1034 .model = 42,
1035 .stepping = 1,
1036 .features[FEAT_1_EDX] =
1037 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1038 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1039 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1040 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1041 CPUID_DE | CPUID_FP87,
1042 .features[FEAT_1_ECX] =
1043 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1044 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1045 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1046 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1047 CPUID_EXT_SSE3,
1048 .features[FEAT_8000_0001_EDX] =
1049 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1050 CPUID_EXT2_SYSCALL,
1051 .features[FEAT_8000_0001_ECX] =
1052 CPUID_EXT3_LAHF_LM,
1053 .features[FEAT_XSAVE] =
1054 CPUID_XSAVE_XSAVEOPT,
1055 .features[FEAT_6_EAX] =
1056 CPUID_6_EAX_ARAT,
1057 .xlevel = 0x80000008,
1058 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1061 .name = "IvyBridge",
1062 .level = 0xd,
1063 .vendor = CPUID_VENDOR_INTEL,
1064 .family = 6,
1065 .model = 58,
1066 .stepping = 9,
1067 .features[FEAT_1_EDX] =
1068 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1069 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1070 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1071 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1072 CPUID_DE | CPUID_FP87,
1073 .features[FEAT_1_ECX] =
1074 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1075 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1076 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1077 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1078 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1079 .features[FEAT_7_0_EBX] =
1080 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1081 CPUID_7_0_EBX_ERMS,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1084 CPUID_EXT2_SYSCALL,
1085 .features[FEAT_8000_0001_ECX] =
1086 CPUID_EXT3_LAHF_LM,
1087 .features[FEAT_XSAVE] =
1088 CPUID_XSAVE_XSAVEOPT,
1089 .features[FEAT_6_EAX] =
1090 CPUID_6_EAX_ARAT,
1091 .xlevel = 0x80000008,
1092 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1095 .name = "Haswell-noTSX",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 60,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1110 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1111 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1112 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1113 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_7_0_EBX] =
1120 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1121 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1122 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1123 .features[FEAT_XSAVE] =
1124 CPUID_XSAVE_XSAVEOPT,
1125 .features[FEAT_6_EAX] =
1126 CPUID_6_EAX_ARAT,
1127 .xlevel = 0x80000008,
1128 .model_id = "Intel Core Processor (Haswell, no TSX)",
1129 }, {
1130 .name = "Haswell",
1131 .level = 0xd,
1132 .vendor = CPUID_VENDOR_INTEL,
1133 .family = 6,
1134 .model = 60,
1135 .stepping = 1,
1136 .features[FEAT_1_EDX] =
1137 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1138 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1139 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1140 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1141 CPUID_DE | CPUID_FP87,
1142 .features[FEAT_1_ECX] =
1143 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1144 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1145 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1146 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1147 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1148 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1149 .features[FEAT_8000_0001_EDX] =
1150 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1151 CPUID_EXT2_SYSCALL,
1152 .features[FEAT_8000_0001_ECX] =
1153 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1154 .features[FEAT_7_0_EBX] =
1155 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1156 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1157 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1158 CPUID_7_0_EBX_RTM,
1159 .features[FEAT_XSAVE] =
1160 CPUID_XSAVE_XSAVEOPT,
1161 .features[FEAT_6_EAX] =
1162 CPUID_6_EAX_ARAT,
1163 .xlevel = 0x80000008,
1164 .model_id = "Intel Core Processor (Haswell)",
1167 .name = "Broadwell-noTSX",
1168 .level = 0xd,
1169 .vendor = CPUID_VENDOR_INTEL,
1170 .family = 6,
1171 .model = 61,
1172 .stepping = 2,
1173 .features[FEAT_1_EDX] =
1174 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1175 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1176 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1177 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1178 CPUID_DE | CPUID_FP87,
1179 .features[FEAT_1_ECX] =
1180 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1181 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1183 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1184 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1185 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1186 .features[FEAT_8000_0001_EDX] =
1187 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1188 CPUID_EXT2_SYSCALL,
1189 .features[FEAT_8000_0001_ECX] =
1190 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1191 .features[FEAT_7_0_EBX] =
1192 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1193 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1194 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1195 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1196 CPUID_7_0_EBX_SMAP,
1197 .features[FEAT_XSAVE] =
1198 CPUID_XSAVE_XSAVEOPT,
1199 .features[FEAT_6_EAX] =
1200 CPUID_6_EAX_ARAT,
1201 .xlevel = 0x80000008,
1202 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1205 .name = "Broadwell",
1206 .level = 0xd,
1207 .vendor = CPUID_VENDOR_INTEL,
1208 .family = 6,
1209 .model = 61,
1210 .stepping = 2,
1211 .features[FEAT_1_EDX] =
1212 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1213 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1214 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1215 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1216 CPUID_DE | CPUID_FP87,
1217 .features[FEAT_1_ECX] =
1218 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1219 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1220 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1221 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1222 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1223 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1224 .features[FEAT_8000_0001_EDX] =
1225 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1226 CPUID_EXT2_SYSCALL,
1227 .features[FEAT_8000_0001_ECX] =
1228 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1229 .features[FEAT_7_0_EBX] =
1230 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1231 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1232 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1233 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1234 CPUID_7_0_EBX_SMAP,
1235 .features[FEAT_XSAVE] =
1236 CPUID_XSAVE_XSAVEOPT,
1237 .features[FEAT_6_EAX] =
1238 CPUID_6_EAX_ARAT,
1239 .xlevel = 0x80000008,
1240 .model_id = "Intel Core Processor (Broadwell)",
1243 .name = "Skylake-Client",
1244 .level = 0xd,
1245 .vendor = CPUID_VENDOR_INTEL,
1246 .family = 6,
1247 .model = 94,
1248 .stepping = 3,
1249 .features[FEAT_1_EDX] =
1250 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1251 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1252 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1253 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1254 CPUID_DE | CPUID_FP87,
1255 .features[FEAT_1_ECX] =
1256 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1257 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1258 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1259 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1260 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1261 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1262 .features[FEAT_8000_0001_EDX] =
1263 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1264 CPUID_EXT2_SYSCALL,
1265 .features[FEAT_8000_0001_ECX] =
1266 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1267 .features[FEAT_7_0_EBX] =
1268 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1269 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1270 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1271 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1272 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1273 /* Missing: XSAVES (not supported by some Linux versions,
1274 * including v4.1 to v4.6).
1275 * KVM doesn't yet expose any XSAVES state save component,
1276 * and the only one defined in Skylake (processor tracing)
1277 * probably will block migration anyway.
1279 .features[FEAT_XSAVE] =
1280 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1281 CPUID_XSAVE_XGETBV1,
1282 .features[FEAT_6_EAX] =
1283 CPUID_6_EAX_ARAT,
1284 .xlevel = 0x80000008,
1285 .model_id = "Intel Core Processor (Skylake)",
1288 .name = "Opteron_G1",
1289 .level = 5,
1290 .vendor = CPUID_VENDOR_AMD,
1291 .family = 15,
1292 .model = 6,
1293 .stepping = 1,
1294 .features[FEAT_1_EDX] =
1295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1299 CPUID_DE | CPUID_FP87,
1300 .features[FEAT_1_ECX] =
1301 CPUID_EXT_SSE3,
1302 .features[FEAT_8000_0001_EDX] =
1303 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1304 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1305 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1306 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1307 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1308 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1309 .xlevel = 0x80000008,
1310 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1313 .name = "Opteron_G2",
1314 .level = 5,
1315 .vendor = CPUID_VENDOR_AMD,
1316 .family = 15,
1317 .model = 6,
1318 .stepping = 1,
1319 .features[FEAT_1_EDX] =
1320 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1321 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1322 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1323 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1324 CPUID_DE | CPUID_FP87,
1325 .features[FEAT_1_ECX] =
1326 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1327 /* Missing: CPUID_EXT2_RDTSCP */
1328 .features[FEAT_8000_0001_EDX] =
1329 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1330 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1331 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1332 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1333 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1334 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1335 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1336 .features[FEAT_8000_0001_ECX] =
1337 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1338 .xlevel = 0x80000008,
1339 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1342 .name = "Opteron_G3",
1343 .level = 5,
1344 .vendor = CPUID_VENDOR_AMD,
1345 .family = 15,
1346 .model = 6,
1347 .stepping = 1,
1348 .features[FEAT_1_EDX] =
1349 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1350 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1351 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1352 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1353 CPUID_DE | CPUID_FP87,
1354 .features[FEAT_1_ECX] =
1355 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1356 CPUID_EXT_SSE3,
1357 /* Missing: CPUID_EXT2_RDTSCP */
1358 .features[FEAT_8000_0001_EDX] =
1359 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1360 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1361 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1362 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1363 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1364 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1365 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1366 .features[FEAT_8000_0001_ECX] =
1367 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1368 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1369 .xlevel = 0x80000008,
1370 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1373 .name = "Opteron_G4",
1374 .level = 0xd,
1375 .vendor = CPUID_VENDOR_AMD,
1376 .family = 21,
1377 .model = 1,
1378 .stepping = 2,
1379 .features[FEAT_1_EDX] =
1380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1384 CPUID_DE | CPUID_FP87,
1385 .features[FEAT_1_ECX] =
1386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1387 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1388 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1389 CPUID_EXT_SSE3,
1390 /* Missing: CPUID_EXT2_RDTSCP */
1391 .features[FEAT_8000_0001_EDX] =
1392 CPUID_EXT2_LM |
1393 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1394 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1395 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1396 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1397 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1398 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1399 .features[FEAT_8000_0001_ECX] =
1400 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1401 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1402 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1403 CPUID_EXT3_LAHF_LM,
1404 /* no xsaveopt! */
1405 .xlevel = 0x8000001A,
1406 .model_id = "AMD Opteron 62xx class CPU",
1409 .name = "Opteron_G5",
1410 .level = 0xd,
1411 .vendor = CPUID_VENDOR_AMD,
1412 .family = 21,
1413 .model = 2,
1414 .stepping = 0,
1415 .features[FEAT_1_EDX] =
1416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1420 CPUID_DE | CPUID_FP87,
1421 .features[FEAT_1_ECX] =
1422 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1423 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1425 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1426 /* Missing: CPUID_EXT2_RDTSCP */
1427 .features[FEAT_8000_0001_EDX] =
1428 CPUID_EXT2_LM |
1429 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1430 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1431 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1432 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1433 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1434 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1435 .features[FEAT_8000_0001_ECX] =
1436 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1437 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1438 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1439 CPUID_EXT3_LAHF_LM,
1440 /* no xsaveopt! */
1441 .xlevel = 0x8000001A,
1442 .model_id = "AMD Opteron 63xx class CPU",
1446 typedef struct PropValue {
1447 const char *prop, *value;
1448 } PropValue;
1450 /* KVM-specific features that are automatically added/removed
1451 * from all CPU models when KVM is enabled.
1453 static PropValue kvm_default_props[] = {
1454 { "kvmclock", "on" },
1455 { "kvm-nopiodelay", "on" },
1456 { "kvm-asyncpf", "on" },
1457 { "kvm-steal-time", "on" },
1458 { "kvm-pv-eoi", "on" },
1459 { "kvmclock-stable-bit", "on" },
1460 { "x2apic", "on" },
1461 { "acpi", "off" },
1462 { "monitor", "off" },
1463 { "svm", "off" },
1464 { NULL, NULL },
1467 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1469 PropValue *pv;
1470 for (pv = kvm_default_props; pv->prop; pv++) {
1471 if (!strcmp(pv->prop, prop)) {
1472 pv->value = value;
1473 break;
1477 /* It is valid to call this function only for properties that
1478 * are already present in the kvm_default_props table.
1480 assert(pv->prop);
1483 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1484 bool migratable_only);
1486 #ifdef CONFIG_KVM
1488 static int cpu_x86_fill_model_id(char *str)
1490 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1491 int i;
1493 for (i = 0; i < 3; i++) {
1494 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1495 memcpy(str + i * 16 + 0, &eax, 4);
1496 memcpy(str + i * 16 + 4, &ebx, 4);
1497 memcpy(str + i * 16 + 8, &ecx, 4);
1498 memcpy(str + i * 16 + 12, &edx, 4);
1500 return 0;
1503 static X86CPUDefinition host_cpudef;
1505 static Property host_x86_cpu_properties[] = {
1506 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1507 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1508 DEFINE_PROP_END_OF_LIST()
1511 /* class_init for the "host" CPU model
1513 * This function may be called before KVM is initialized.
1515 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1517 DeviceClass *dc = DEVICE_CLASS(oc);
1518 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1519 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1521 xcc->kvm_required = true;
1523 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1524 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1526 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1527 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1528 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1529 host_cpudef.stepping = eax & 0x0F;
1531 cpu_x86_fill_model_id(host_cpudef.model_id);
1533 xcc->cpu_def = &host_cpudef;
1535 /* level, xlevel, xlevel2, and the feature words are initialized on
1536 * instance_init, because they require KVM to be initialized.
1539 dc->props = host_x86_cpu_properties;
1540 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1541 dc->cannot_destroy_with_object_finalize_yet = true;
1544 static void host_x86_cpu_initfn(Object *obj)
1546 X86CPU *cpu = X86_CPU(obj);
1547 CPUX86State *env = &cpu->env;
1548 KVMState *s = kvm_state;
1550 assert(kvm_enabled());
1552 /* We can't fill the features array here because we don't know yet if
1553 * "migratable" is true or false.
1555 cpu->host_features = true;
1557 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1558 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1559 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1561 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1564 static const TypeInfo host_x86_cpu_type_info = {
1565 .name = X86_CPU_TYPE_NAME("host"),
1566 .parent = TYPE_X86_CPU,
1567 .instance_init = host_x86_cpu_initfn,
1568 .class_init = host_x86_cpu_class_init,
1571 #endif
1573 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1575 FeatureWordInfo *f = &feature_word_info[w];
1576 int i;
1578 for (i = 0; i < 32; ++i) {
1579 if ((1UL << i) & mask) {
1580 const char *reg = get_register_name_32(f->cpuid_reg);
1581 assert(reg);
1582 fprintf(stderr, "warning: %s doesn't support requested feature: "
1583 "CPUID.%02XH:%s%s%s [bit %d]\n",
1584 kvm_enabled() ? "host" : "TCG",
1585 f->cpuid_eax, reg,
1586 f->feat_names[i] ? "." : "",
1587 f->feat_names[i] ? f->feat_names[i] : "", i);
1592 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1593 const char *name, void *opaque,
1594 Error **errp)
1596 X86CPU *cpu = X86_CPU(obj);
1597 CPUX86State *env = &cpu->env;
1598 int64_t value;
1600 value = (env->cpuid_version >> 8) & 0xf;
1601 if (value == 0xf) {
1602 value += (env->cpuid_version >> 20) & 0xff;
1604 visit_type_int(v, name, &value, errp);
1607 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1608 const char *name, void *opaque,
1609 Error **errp)
1611 X86CPU *cpu = X86_CPU(obj);
1612 CPUX86State *env = &cpu->env;
1613 const int64_t min = 0;
1614 const int64_t max = 0xff + 0xf;
1615 Error *local_err = NULL;
1616 int64_t value;
1618 visit_type_int(v, name, &value, &local_err);
1619 if (local_err) {
1620 error_propagate(errp, local_err);
1621 return;
1623 if (value < min || value > max) {
1624 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1625 name ? name : "null", value, min, max);
1626 return;
1629 env->cpuid_version &= ~0xff00f00;
1630 if (value > 0x0f) {
1631 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1632 } else {
1633 env->cpuid_version |= value << 8;
1637 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1638 const char *name, void *opaque,
1639 Error **errp)
1641 X86CPU *cpu = X86_CPU(obj);
1642 CPUX86State *env = &cpu->env;
1643 int64_t value;
1645 value = (env->cpuid_version >> 4) & 0xf;
1646 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1647 visit_type_int(v, name, &value, errp);
1650 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1651 const char *name, void *opaque,
1652 Error **errp)
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1656 const int64_t min = 0;
1657 const int64_t max = 0xff;
1658 Error *local_err = NULL;
1659 int64_t value;
1661 visit_type_int(v, name, &value, &local_err);
1662 if (local_err) {
1663 error_propagate(errp, local_err);
1664 return;
1666 if (value < min || value > max) {
1667 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1668 name ? name : "null", value, min, max);
1669 return;
1672 env->cpuid_version &= ~0xf00f0;
1673 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1676 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1677 const char *name, void *opaque,
1678 Error **errp)
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 int64_t value;
1684 value = env->cpuid_version & 0xf;
1685 visit_type_int(v, name, &value, errp);
1688 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1689 const char *name, void *opaque,
1690 Error **errp)
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 const int64_t min = 0;
1695 const int64_t max = 0xf;
1696 Error *local_err = NULL;
1697 int64_t value;
1699 visit_type_int(v, name, &value, &local_err);
1700 if (local_err) {
1701 error_propagate(errp, local_err);
1702 return;
1704 if (value < min || value > max) {
1705 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1706 name ? name : "null", value, min, max);
1707 return;
1710 env->cpuid_version &= ~0xf;
1711 env->cpuid_version |= value & 0xf;
1714 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1716 X86CPU *cpu = X86_CPU(obj);
1717 CPUX86State *env = &cpu->env;
1718 char *value;
1720 value = g_malloc(CPUID_VENDOR_SZ + 1);
1721 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1722 env->cpuid_vendor3);
1723 return value;
1726 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1727 Error **errp)
1729 X86CPU *cpu = X86_CPU(obj);
1730 CPUX86State *env = &cpu->env;
1731 int i;
1733 if (strlen(value) != CPUID_VENDOR_SZ) {
1734 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1735 return;
1738 env->cpuid_vendor1 = 0;
1739 env->cpuid_vendor2 = 0;
1740 env->cpuid_vendor3 = 0;
1741 for (i = 0; i < 4; i++) {
1742 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1743 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1744 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1748 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1750 X86CPU *cpu = X86_CPU(obj);
1751 CPUX86State *env = &cpu->env;
1752 char *value;
1753 int i;
1755 value = g_malloc(48 + 1);
1756 for (i = 0; i < 48; i++) {
1757 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1759 value[48] = '\0';
1760 return value;
1763 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1764 Error **errp)
1766 X86CPU *cpu = X86_CPU(obj);
1767 CPUX86State *env = &cpu->env;
1768 int c, len, i;
1770 if (model_id == NULL) {
1771 model_id = "";
1773 len = strlen(model_id);
1774 memset(env->cpuid_model, 0, 48);
1775 for (i = 0; i < 48; i++) {
1776 if (i >= len) {
1777 c = '\0';
1778 } else {
1779 c = (uint8_t)model_id[i];
1781 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1785 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1786 void *opaque, Error **errp)
1788 X86CPU *cpu = X86_CPU(obj);
1789 int64_t value;
1791 value = cpu->env.tsc_khz * 1000;
1792 visit_type_int(v, name, &value, errp);
1795 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1796 void *opaque, Error **errp)
1798 X86CPU *cpu = X86_CPU(obj);
1799 const int64_t min = 0;
1800 const int64_t max = INT64_MAX;
1801 Error *local_err = NULL;
1802 int64_t value;
1804 visit_type_int(v, name, &value, &local_err);
1805 if (local_err) {
1806 error_propagate(errp, local_err);
1807 return;
1809 if (value < min || value > max) {
1810 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1811 name ? name : "null", value, min, max);
1812 return;
1815 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1818 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1819 void *opaque, Error **errp)
1821 X86CPU *cpu = X86_CPU(obj);
1822 int64_t value = cpu->apic_id;
1824 visit_type_int(v, name, &value, errp);
1827 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1828 void *opaque, Error **errp)
1830 X86CPU *cpu = X86_CPU(obj);
1831 DeviceState *dev = DEVICE(obj);
1832 const int64_t min = 0;
1833 const int64_t max = UINT32_MAX;
1834 Error *error = NULL;
1835 int64_t value;
1837 if (dev->realized) {
1838 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1839 "it was realized", name, object_get_typename(obj));
1840 return;
1843 visit_type_int(v, name, &value, &error);
1844 if (error) {
1845 error_propagate(errp, error);
1846 return;
1848 if (value < min || value > max) {
1849 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1850 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1851 object_get_typename(obj), name, value, min, max);
1852 return;
1855 if ((value != cpu->apic_id) && cpu_exists(value)) {
1856 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1857 return;
1859 cpu->apic_id = value;
1862 /* Generic getter for "feature-words" and "filtered-features" properties */
1863 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1864 const char *name, void *opaque,
1865 Error **errp)
1867 uint32_t *array = (uint32_t *)opaque;
1868 FeatureWord w;
1869 Error *err = NULL;
1870 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1871 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1872 X86CPUFeatureWordInfoList *list = NULL;
1874 for (w = 0; w < FEATURE_WORDS; w++) {
1875 FeatureWordInfo *wi = &feature_word_info[w];
1876 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1877 qwi->cpuid_input_eax = wi->cpuid_eax;
1878 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1879 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1880 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1881 qwi->features = array[w];
1883 /* List will be in reverse order, but order shouldn't matter */
1884 list_entries[w].next = list;
1885 list_entries[w].value = &word_infos[w];
1886 list = &list_entries[w];
1889 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1890 error_propagate(errp, err);
1893 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1894 void *opaque, Error **errp)
1896 X86CPU *cpu = X86_CPU(obj);
1897 int64_t value = cpu->hyperv_spinlock_attempts;
1899 visit_type_int(v, name, &value, errp);
1902 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1903 void *opaque, Error **errp)
1905 const int64_t min = 0xFFF;
1906 const int64_t max = UINT_MAX;
1907 X86CPU *cpu = X86_CPU(obj);
1908 Error *err = NULL;
1909 int64_t value;
1911 visit_type_int(v, name, &value, &err);
1912 if (err) {
1913 error_propagate(errp, err);
1914 return;
1917 if (value < min || value > max) {
1918 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1919 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1920 object_get_typename(obj), name ? name : "null",
1921 value, min, max);
1922 return;
1924 cpu->hyperv_spinlock_attempts = value;
1927 static PropertyInfo qdev_prop_spinlocks = {
1928 .name = "int",
1929 .get = x86_get_hv_spinlocks,
1930 .set = x86_set_hv_spinlocks,
1933 /* Convert all '_' in a feature string option name to '-', to make feature
1934 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1936 static inline void feat2prop(char *s)
1938 while ((s = strchr(s, '_'))) {
1939 *s = '-';
1943 /* Parse "+feature,-feature,feature=foo" CPU feature string
1945 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1946 Error **errp)
1948 X86CPU *cpu = X86_CPU(cs);
1949 char *featurestr; /* Single 'key=value" string being parsed */
1950 FeatureWord w;
1951 /* Features to be added */
1952 FeatureWordArray plus_features = { 0 };
1953 /* Features to be removed */
1954 FeatureWordArray minus_features = { 0 };
1955 uint32_t numvalue;
1956 CPUX86State *env = &cpu->env;
1957 Error *local_err = NULL;
1959 featurestr = features ? strtok(features, ",") : NULL;
1961 while (featurestr) {
1962 char *val;
1963 if (featurestr[0] == '+') {
1964 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1965 } else if (featurestr[0] == '-') {
1966 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1967 } else if ((val = strchr(featurestr, '='))) {
1968 *val = 0; val++;
1969 feat2prop(featurestr);
1970 if (!strcmp(featurestr, "xlevel")) {
1971 char *err;
1972 char num[32];
1974 numvalue = strtoul(val, &err, 0);
1975 if (!*val || *err) {
1976 error_setg(errp, "bad numerical value %s", val);
1977 return;
1979 if (numvalue < 0x80000000) {
1980 error_report("xlevel value shall always be >= 0x80000000"
1981 ", fixup will be removed in future versions");
1982 numvalue += 0x80000000;
1984 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1985 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1986 } else if (!strcmp(featurestr, "tsc-freq")) {
1987 int64_t tsc_freq;
1988 char *err;
1989 char num[32];
1991 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1992 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1993 if (tsc_freq < 0 || *err) {
1994 error_setg(errp, "bad numerical value %s", val);
1995 return;
1997 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1998 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1999 &local_err);
2000 } else if (!strcmp(featurestr, "hv-spinlocks")) {
2001 char *err;
2002 const int min = 0xFFF;
2003 char num[32];
2004 numvalue = strtoul(val, &err, 0);
2005 if (!*val || *err) {
2006 error_setg(errp, "bad numerical value %s", val);
2007 return;
2009 if (numvalue < min) {
2010 error_report("hv-spinlocks value shall always be >= 0x%x"
2011 ", fixup will be removed in future versions",
2012 min);
2013 numvalue = min;
2015 snprintf(num, sizeof(num), "%" PRId32, numvalue);
2016 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
2017 } else {
2018 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
2020 } else {
2021 feat2prop(featurestr);
2022 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
2024 if (local_err) {
2025 error_propagate(errp, local_err);
2026 return;
2028 featurestr = strtok(NULL, ",");
2031 if (cpu->host_features) {
2032 for (w = 0; w < FEATURE_WORDS; w++) {
2033 env->features[w] =
2034 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2038 for (w = 0; w < FEATURE_WORDS; w++) {
2039 env->features[w] |= plus_features[w];
2040 env->features[w] &= ~minus_features[w];
2044 /* Print all cpuid feature names in featureset
2046 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2048 int bit;
2049 bool first = true;
2051 for (bit = 0; bit < 32; bit++) {
2052 if (featureset[bit]) {
2053 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2054 first = false;
2059 /* generate CPU information. */
2060 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2062 X86CPUDefinition *def;
2063 char buf[256];
2064 int i;
2066 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2067 def = &builtin_x86_defs[i];
2068 snprintf(buf, sizeof(buf), "%s", def->name);
2069 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2071 #ifdef CONFIG_KVM
2072 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2073 "KVM processor with all supported host features "
2074 "(only available in KVM mode)");
2075 #endif
2077 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2078 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2079 FeatureWordInfo *fw = &feature_word_info[i];
2081 (*cpu_fprintf)(f, " ");
2082 listflags(f, cpu_fprintf, fw->feat_names);
2083 (*cpu_fprintf)(f, "\n");
2087 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2089 CpuDefinitionInfoList *cpu_list = NULL;
2090 X86CPUDefinition *def;
2091 int i;
2093 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2094 CpuDefinitionInfoList *entry;
2095 CpuDefinitionInfo *info;
2097 def = &builtin_x86_defs[i];
2098 info = g_malloc0(sizeof(*info));
2099 info->name = g_strdup(def->name);
2101 entry = g_malloc0(sizeof(*entry));
2102 entry->value = info;
2103 entry->next = cpu_list;
2104 cpu_list = entry;
2107 return cpu_list;
2110 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2111 bool migratable_only)
2113 FeatureWordInfo *wi = &feature_word_info[w];
2114 uint32_t r;
2116 if (kvm_enabled()) {
2117 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2118 wi->cpuid_ecx,
2119 wi->cpuid_reg);
2120 } else if (tcg_enabled()) {
2121 r = wi->tcg_features;
2122 } else {
2123 return ~0;
2125 if (migratable_only) {
2126 r &= x86_cpu_get_migratable_flags(w);
2128 return r;
2132 * Filters CPU feature words based on host availability of each feature.
2134 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2136 static int x86_cpu_filter_features(X86CPU *cpu)
2138 CPUX86State *env = &cpu->env;
2139 FeatureWord w;
2140 int rv = 0;
2142 for (w = 0; w < FEATURE_WORDS; w++) {
2143 uint32_t host_feat =
2144 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2145 uint32_t requested_features = env->features[w];
2146 env->features[w] &= host_feat;
2147 cpu->filtered_features[w] = requested_features & ~env->features[w];
2148 if (cpu->filtered_features[w]) {
2149 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2150 report_unavailable_features(w, cpu->filtered_features[w]);
2152 rv = 1;
2156 return rv;
2159 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2161 PropValue *pv;
2162 for (pv = props; pv->prop; pv++) {
2163 if (!pv->value) {
2164 continue;
2166 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2167 &error_abort);
2171 /* Load data from X86CPUDefinition
2173 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2175 CPUX86State *env = &cpu->env;
2176 const char *vendor;
2177 char host_vendor[CPUID_VENDOR_SZ + 1];
2178 FeatureWord w;
2180 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2181 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2182 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2183 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2184 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2185 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2186 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2187 for (w = 0; w < FEATURE_WORDS; w++) {
2188 env->features[w] = def->features[w];
2191 /* Special cases not set in the X86CPUDefinition structs: */
2192 if (kvm_enabled()) {
2193 if (!kvm_irqchip_in_kernel()) {
2194 x86_cpu_change_kvm_default("x2apic", "off");
2197 x86_cpu_apply_props(cpu, kvm_default_props);
2200 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2202 /* sysenter isn't supported in compatibility mode on AMD,
2203 * syscall isn't supported in compatibility mode on Intel.
2204 * Normally we advertise the actual CPU vendor, but you can
2205 * override this using the 'vendor' property if you want to use
2206 * KVM's sysenter/syscall emulation in compatibility mode and
2207 * when doing cross vendor migration
2209 vendor = def->vendor;
2210 if (kvm_enabled()) {
2211 uint32_t ebx = 0, ecx = 0, edx = 0;
2212 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2213 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2214 vendor = host_vendor;
2217 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2221 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2223 X86CPU *cpu = NULL;
2224 X86CPUClass *xcc;
2225 ObjectClass *oc;
2226 gchar **model_pieces;
2227 char *name, *features;
2228 Error *error = NULL;
2230 model_pieces = g_strsplit(cpu_model, ",", 2);
2231 if (!model_pieces[0]) {
2232 error_setg(&error, "Invalid/empty CPU model name");
2233 goto out;
2235 name = model_pieces[0];
2236 features = model_pieces[1];
2238 oc = x86_cpu_class_by_name(name);
2239 if (oc == NULL) {
2240 error_setg(&error, "Unable to find CPU definition: %s", name);
2241 goto out;
2243 xcc = X86_CPU_CLASS(oc);
2245 if (xcc->kvm_required && !kvm_enabled()) {
2246 error_setg(&error, "CPU model '%s' requires KVM", name);
2247 goto out;
2250 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2252 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2253 if (error) {
2254 goto out;
2257 out:
2258 if (error != NULL) {
2259 error_propagate(errp, error);
2260 if (cpu) {
2261 object_unref(OBJECT(cpu));
2262 cpu = NULL;
2265 g_strfreev(model_pieces);
2266 return cpu;
2269 X86CPU *cpu_x86_init(const char *cpu_model)
2271 Error *error = NULL;
2272 X86CPU *cpu;
2274 cpu = cpu_x86_create(cpu_model, &error);
2275 if (error) {
2276 goto out;
2279 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2281 out:
2282 if (error) {
2283 error_report_err(error);
2284 if (cpu != NULL) {
2285 object_unref(OBJECT(cpu));
2286 cpu = NULL;
2289 return cpu;
2292 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2294 X86CPUDefinition *cpudef = data;
2295 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2297 xcc->cpu_def = cpudef;
2300 static void x86_register_cpudef_type(X86CPUDefinition *def)
2302 char *typename = x86_cpu_type_name(def->name);
2303 TypeInfo ti = {
2304 .name = typename,
2305 .parent = TYPE_X86_CPU,
2306 .class_init = x86_cpu_cpudef_class_init,
2307 .class_data = def,
2310 type_register(&ti);
2311 g_free(typename);
2314 #if !defined(CONFIG_USER_ONLY)
2316 void cpu_clear_apic_feature(CPUX86State *env)
2318 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2321 #endif /* !CONFIG_USER_ONLY */
2323 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2324 uint32_t *eax, uint32_t *ebx,
2325 uint32_t *ecx, uint32_t *edx)
2327 X86CPU *cpu = x86_env_get_cpu(env);
2328 CPUState *cs = CPU(cpu);
2330 /* test if maximum index reached */
2331 if (index & 0x80000000) {
2332 if (index > env->cpuid_xlevel) {
2333 if (env->cpuid_xlevel2 > 0) {
2334 /* Handle the Centaur's CPUID instruction. */
2335 if (index > env->cpuid_xlevel2) {
2336 index = env->cpuid_xlevel2;
2337 } else if (index < 0xC0000000) {
2338 index = env->cpuid_xlevel;
2340 } else {
2341 /* Intel documentation states that invalid EAX input will
2342 * return the same information as EAX=cpuid_level
2343 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2345 index = env->cpuid_level;
2348 } else {
2349 if (index > env->cpuid_level)
2350 index = env->cpuid_level;
2353 switch(index) {
2354 case 0:
2355 *eax = env->cpuid_level;
2356 *ebx = env->cpuid_vendor1;
2357 *edx = env->cpuid_vendor2;
2358 *ecx = env->cpuid_vendor3;
2359 break;
2360 case 1:
2361 *eax = env->cpuid_version;
2362 *ebx = (cpu->apic_id << 24) |
2363 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2364 *ecx = env->features[FEAT_1_ECX];
2365 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2366 *ecx |= CPUID_EXT_OSXSAVE;
2368 *edx = env->features[FEAT_1_EDX];
2369 if (cs->nr_cores * cs->nr_threads > 1) {
2370 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2371 *edx |= CPUID_HT;
2373 break;
2374 case 2:
2375 /* cache info: needed for Pentium Pro compatibility */
2376 if (cpu->cache_info_passthrough) {
2377 host_cpuid(index, 0, eax, ebx, ecx, edx);
2378 break;
2380 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2381 *ebx = 0;
2382 *ecx = 0;
2383 *edx = (L1D_DESCRIPTOR << 16) | \
2384 (L1I_DESCRIPTOR << 8) | \
2385 (L2_DESCRIPTOR);
2386 break;
2387 case 4:
2388 /* cache info: needed for Core compatibility */
2389 if (cpu->cache_info_passthrough) {
2390 host_cpuid(index, count, eax, ebx, ecx, edx);
2391 *eax &= ~0xFC000000;
2392 } else {
2393 *eax = 0;
2394 switch (count) {
2395 case 0: /* L1 dcache info */
2396 *eax |= CPUID_4_TYPE_DCACHE | \
2397 CPUID_4_LEVEL(1) | \
2398 CPUID_4_SELF_INIT_LEVEL;
2399 *ebx = (L1D_LINE_SIZE - 1) | \
2400 ((L1D_PARTITIONS - 1) << 12) | \
2401 ((L1D_ASSOCIATIVITY - 1) << 22);
2402 *ecx = L1D_SETS - 1;
2403 *edx = CPUID_4_NO_INVD_SHARING;
2404 break;
2405 case 1: /* L1 icache info */
2406 *eax |= CPUID_4_TYPE_ICACHE | \
2407 CPUID_4_LEVEL(1) | \
2408 CPUID_4_SELF_INIT_LEVEL;
2409 *ebx = (L1I_LINE_SIZE - 1) | \
2410 ((L1I_PARTITIONS - 1) << 12) | \
2411 ((L1I_ASSOCIATIVITY - 1) << 22);
2412 *ecx = L1I_SETS - 1;
2413 *edx = CPUID_4_NO_INVD_SHARING;
2414 break;
2415 case 2: /* L2 cache info */
2416 *eax |= CPUID_4_TYPE_UNIFIED | \
2417 CPUID_4_LEVEL(2) | \
2418 CPUID_4_SELF_INIT_LEVEL;
2419 if (cs->nr_threads > 1) {
2420 *eax |= (cs->nr_threads - 1) << 14;
2422 *ebx = (L2_LINE_SIZE - 1) | \
2423 ((L2_PARTITIONS - 1) << 12) | \
2424 ((L2_ASSOCIATIVITY - 1) << 22);
2425 *ecx = L2_SETS - 1;
2426 *edx = CPUID_4_NO_INVD_SHARING;
2427 break;
2428 default: /* end of info */
2429 *eax = 0;
2430 *ebx = 0;
2431 *ecx = 0;
2432 *edx = 0;
2433 break;
2437 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2438 if ((*eax & 31) && cs->nr_cores > 1) {
2439 *eax |= (cs->nr_cores - 1) << 26;
2441 break;
2442 case 5:
2443 /* mwait info: needed for Core compatibility */
2444 *eax = 0; /* Smallest monitor-line size in bytes */
2445 *ebx = 0; /* Largest monitor-line size in bytes */
2446 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2447 *edx = 0;
2448 break;
2449 case 6:
2450 /* Thermal and Power Leaf */
2451 *eax = env->features[FEAT_6_EAX];
2452 *ebx = 0;
2453 *ecx = 0;
2454 *edx = 0;
2455 break;
2456 case 7:
2457 /* Structured Extended Feature Flags Enumeration Leaf */
2458 if (count == 0) {
2459 *eax = 0; /* Maximum ECX value for sub-leaves */
2460 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2461 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2462 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2463 *ecx |= CPUID_7_0_ECX_OSPKE;
2465 *edx = 0; /* Reserved */
2466 } else {
2467 *eax = 0;
2468 *ebx = 0;
2469 *ecx = 0;
2470 *edx = 0;
2472 break;
2473 case 9:
2474 /* Direct Cache Access Information Leaf */
2475 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2476 *ebx = 0;
2477 *ecx = 0;
2478 *edx = 0;
2479 break;
2480 case 0xA:
2481 /* Architectural Performance Monitoring Leaf */
2482 if (kvm_enabled() && cpu->enable_pmu) {
2483 KVMState *s = cs->kvm_state;
2485 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2486 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2487 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2488 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2489 } else {
2490 *eax = 0;
2491 *ebx = 0;
2492 *ecx = 0;
2493 *edx = 0;
2495 break;
2496 case 0xB:
2497 /* Extended Topology Enumeration Leaf */
2498 if (!cpu->enable_cpuid_0xb) {
2499 *eax = *ebx = *ecx = *edx = 0;
2500 break;
2503 *ecx = count & 0xff;
2504 *edx = cpu->apic_id;
2506 switch (count) {
2507 case 0:
2508 *eax = apicid_core_offset(smp_cores, smp_threads);
2509 *ebx = smp_threads;
2510 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2511 break;
2512 case 1:
2513 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2514 *ebx = smp_cores * smp_threads;
2515 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2516 break;
2517 default:
2518 *eax = 0;
2519 *ebx = 0;
2520 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2523 assert(!(*eax & ~0x1f));
2524 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2525 break;
2526 case 0xD: {
2527 KVMState *s = cs->kvm_state;
2528 uint64_t ena_mask;
2529 int i;
2531 /* Processor Extended State */
2532 *eax = 0;
2533 *ebx = 0;
2534 *ecx = 0;
2535 *edx = 0;
2536 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2537 break;
2539 if (kvm_enabled()) {
2540 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2541 ena_mask <<= 32;
2542 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2543 } else {
2544 ena_mask = -1;
2547 if (count == 0) {
2548 *ecx = 0x240;
2549 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2550 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2551 if ((env->features[esa->feature] & esa->bits) == esa->bits
2552 && ((ena_mask >> i) & 1) != 0) {
2553 if (i < 32) {
2554 *eax |= 1u << i;
2555 } else {
2556 *edx |= 1u << (i - 32);
2558 *ecx = MAX(*ecx, esa->offset + esa->size);
2561 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2562 *ebx = *ecx;
2563 } else if (count == 1) {
2564 *eax = env->features[FEAT_XSAVE];
2565 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2566 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2567 if ((env->features[esa->feature] & esa->bits) == esa->bits
2568 && ((ena_mask >> count) & 1) != 0) {
2569 *eax = esa->size;
2570 *ebx = esa->offset;
2573 break;
2575 case 0x80000000:
2576 *eax = env->cpuid_xlevel;
2577 *ebx = env->cpuid_vendor1;
2578 *edx = env->cpuid_vendor2;
2579 *ecx = env->cpuid_vendor3;
2580 break;
2581 case 0x80000001:
2582 *eax = env->cpuid_version;
2583 *ebx = 0;
2584 *ecx = env->features[FEAT_8000_0001_ECX];
2585 *edx = env->features[FEAT_8000_0001_EDX];
2587 /* The Linux kernel checks for the CMPLegacy bit and
2588 * discards multiple thread information if it is set.
2589 * So don't set it here for Intel to make Linux guests happy.
2591 if (cs->nr_cores * cs->nr_threads > 1) {
2592 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2593 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2594 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2595 *ecx |= 1 << 1; /* CmpLegacy bit */
2598 break;
2599 case 0x80000002:
2600 case 0x80000003:
2601 case 0x80000004:
2602 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2603 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2604 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2605 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2606 break;
2607 case 0x80000005:
2608 /* cache info (L1 cache) */
2609 if (cpu->cache_info_passthrough) {
2610 host_cpuid(index, 0, eax, ebx, ecx, edx);
2611 break;
2613 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2614 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2615 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2616 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2617 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2618 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2619 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2620 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2621 break;
2622 case 0x80000006:
2623 /* cache info (L2 cache) */
2624 if (cpu->cache_info_passthrough) {
2625 host_cpuid(index, 0, eax, ebx, ecx, edx);
2626 break;
2628 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2629 (L2_DTLB_2M_ENTRIES << 16) | \
2630 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2631 (L2_ITLB_2M_ENTRIES);
2632 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2633 (L2_DTLB_4K_ENTRIES << 16) | \
2634 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2635 (L2_ITLB_4K_ENTRIES);
2636 *ecx = (L2_SIZE_KB_AMD << 16) | \
2637 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2638 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2639 *edx = ((L3_SIZE_KB/512) << 18) | \
2640 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2641 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2642 break;
2643 case 0x80000007:
2644 *eax = 0;
2645 *ebx = 0;
2646 *ecx = 0;
2647 *edx = env->features[FEAT_8000_0007_EDX];
2648 break;
2649 case 0x80000008:
2650 /* virtual & phys address size in low 2 bytes. */
2651 /* XXX: This value must match the one used in the MMU code. */
2652 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2653 /* 64 bit processor */
2654 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2655 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2656 } else {
2657 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2658 *eax = 0x00000024; /* 36 bits physical */
2659 } else {
2660 *eax = 0x00000020; /* 32 bits physical */
2663 *ebx = 0;
2664 *ecx = 0;
2665 *edx = 0;
2666 if (cs->nr_cores * cs->nr_threads > 1) {
2667 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2669 break;
2670 case 0x8000000A:
2671 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2672 *eax = 0x00000001; /* SVM Revision */
2673 *ebx = 0x00000010; /* nr of ASIDs */
2674 *ecx = 0;
2675 *edx = env->features[FEAT_SVM]; /* optional features */
2676 } else {
2677 *eax = 0;
2678 *ebx = 0;
2679 *ecx = 0;
2680 *edx = 0;
2682 break;
2683 case 0xC0000000:
2684 *eax = env->cpuid_xlevel2;
2685 *ebx = 0;
2686 *ecx = 0;
2687 *edx = 0;
2688 break;
2689 case 0xC0000001:
2690 /* Support for VIA CPU's CPUID instruction */
2691 *eax = env->cpuid_version;
2692 *ebx = 0;
2693 *ecx = 0;
2694 *edx = env->features[FEAT_C000_0001_EDX];
2695 break;
2696 case 0xC0000002:
2697 case 0xC0000003:
2698 case 0xC0000004:
2699 /* Reserved for the future, and now filled with zero */
2700 *eax = 0;
2701 *ebx = 0;
2702 *ecx = 0;
2703 *edx = 0;
2704 break;
2705 default:
2706 /* reserved values: zero */
2707 *eax = 0;
2708 *ebx = 0;
2709 *ecx = 0;
2710 *edx = 0;
2711 break;
2715 /* CPUClass::reset() */
2716 static void x86_cpu_reset(CPUState *s)
2718 X86CPU *cpu = X86_CPU(s);
2719 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2720 CPUX86State *env = &cpu->env;
2721 target_ulong cr4;
2722 uint64_t xcr0;
2723 int i;
2725 xcc->parent_reset(s);
2727 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2729 tlb_flush(s, 1);
2731 env->old_exception = -1;
2733 /* init to reset state */
2735 #ifdef CONFIG_SOFTMMU
2736 env->hflags |= HF_SOFTMMU_MASK;
2737 #endif
2738 env->hflags2 |= HF2_GIF_MASK;
2740 cpu_x86_update_cr0(env, 0x60000010);
2741 env->a20_mask = ~0x0;
2742 env->smbase = 0x30000;
2744 env->idt.limit = 0xffff;
2745 env->gdt.limit = 0xffff;
2746 env->ldt.limit = 0xffff;
2747 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2748 env->tr.limit = 0xffff;
2749 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2751 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2752 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2753 DESC_R_MASK | DESC_A_MASK);
2754 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2755 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2756 DESC_A_MASK);
2757 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2758 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2759 DESC_A_MASK);
2760 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2761 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2762 DESC_A_MASK);
2763 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2764 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2765 DESC_A_MASK);
2766 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2767 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2768 DESC_A_MASK);
2770 env->eip = 0xfff0;
2771 env->regs[R_EDX] = env->cpuid_version;
2773 env->eflags = 0x2;
2775 /* FPU init */
2776 for (i = 0; i < 8; i++) {
2777 env->fptags[i] = 1;
2779 cpu_set_fpuc(env, 0x37f);
2781 env->mxcsr = 0x1f80;
2782 /* All units are in INIT state. */
2783 env->xstate_bv = 0;
2785 env->pat = 0x0007040600070406ULL;
2786 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2788 memset(env->dr, 0, sizeof(env->dr));
2789 env->dr[6] = DR6_FIXED_1;
2790 env->dr[7] = DR7_FIXED_1;
2791 cpu_breakpoint_remove_all(s, BP_CPU);
2792 cpu_watchpoint_remove_all(s, BP_CPU);
2794 cr4 = 0;
2795 xcr0 = XSTATE_FP_MASK;
2797 #ifdef CONFIG_USER_ONLY
2798 /* Enable all the features for user-mode. */
2799 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2800 xcr0 |= XSTATE_SSE_MASK;
2802 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2803 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2804 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2805 xcr0 |= 1ull << i;
2809 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2810 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2812 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2813 cr4 |= CR4_FSGSBASE_MASK;
2815 #endif
2817 env->xcr0 = xcr0;
2818 cpu_x86_update_cr4(env, cr4);
2821 * SDM 11.11.5 requires:
2822 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2823 * - IA32_MTRR_PHYSMASKn.V = 0
2824 * All other bits are undefined. For simplification, zero it all.
2826 env->mtrr_deftype = 0;
2827 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2828 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2830 #if !defined(CONFIG_USER_ONLY)
2831 /* We hard-wire the BSP to the first CPU. */
2832 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2834 s->halted = !cpu_is_bsp(cpu);
2836 if (kvm_enabled()) {
2837 kvm_arch_reset_vcpu(cpu);
2839 #endif
2842 #ifndef CONFIG_USER_ONLY
2843 bool cpu_is_bsp(X86CPU *cpu)
2845 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2848 /* TODO: remove me, when reset over QOM tree is implemented */
2849 static void x86_cpu_machine_reset_cb(void *opaque)
2851 X86CPU *cpu = opaque;
2852 cpu_reset(CPU(cpu));
2854 #endif
2856 static void mce_init(X86CPU *cpu)
2858 CPUX86State *cenv = &cpu->env;
2859 unsigned int bank;
2861 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2862 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2863 (CPUID_MCE | CPUID_MCA)) {
2864 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2865 cenv->mcg_ctl = ~(uint64_t)0;
2866 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2867 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2872 #ifndef CONFIG_USER_ONLY
2873 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2875 APICCommonState *apic;
2876 const char *apic_type = "apic";
2878 if (kvm_apic_in_kernel()) {
2879 apic_type = "kvm-apic";
2880 } else if (xen_enabled()) {
2881 apic_type = "xen-apic";
2884 cpu->apic_state = DEVICE(object_new(apic_type));
2886 object_property_add_child(OBJECT(cpu), "apic",
2887 OBJECT(cpu->apic_state), NULL);
2888 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2889 /* TODO: convert to link<> */
2890 apic = APIC_COMMON(cpu->apic_state);
2891 apic->cpu = cpu;
2892 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2895 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2897 APICCommonState *apic;
2898 static bool apic_mmio_map_once;
2900 if (cpu->apic_state == NULL) {
2901 return;
2903 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2904 errp);
2906 /* Map APIC MMIO area */
2907 apic = APIC_COMMON(cpu->apic_state);
2908 if (!apic_mmio_map_once) {
2909 memory_region_add_subregion_overlap(get_system_memory(),
2910 apic->apicbase &
2911 MSR_IA32_APICBASE_BASE,
2912 &apic->io_memory,
2913 0x1000);
2914 apic_mmio_map_once = true;
2918 static void x86_cpu_machine_done(Notifier *n, void *unused)
2920 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2921 MemoryRegion *smram =
2922 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2924 if (smram) {
2925 cpu->smram = g_new(MemoryRegion, 1);
2926 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2927 smram, 0, 1ull << 32);
2928 memory_region_set_enabled(cpu->smram, false);
2929 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2932 #else
2933 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2936 #endif
2939 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2940 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2941 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2942 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2943 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2944 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2945 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2947 CPUState *cs = CPU(dev);
2948 X86CPU *cpu = X86_CPU(dev);
2949 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2950 CPUX86State *env = &cpu->env;
2951 Error *local_err = NULL;
2952 static bool ht_warned;
2954 if (cpu->apic_id < 0) {
2955 error_setg(errp, "apic-id property was not initialized properly");
2956 return;
2959 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2960 env->cpuid_level = 7;
2963 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2964 error_setg(&local_err,
2965 kvm_enabled() ?
2966 "Host doesn't support requested features" :
2967 "TCG doesn't support requested features");
2968 goto out;
2971 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2972 * CPUID[1].EDX.
2974 if (IS_AMD_CPU(env)) {
2975 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2976 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2977 & CPUID_EXT2_AMD_ALIASES);
2981 cpu_exec_init(cs, &error_abort);
2983 if (tcg_enabled()) {
2984 tcg_x86_init();
2987 #ifndef CONFIG_USER_ONLY
2988 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2990 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2991 x86_cpu_apic_create(cpu, &local_err);
2992 if (local_err != NULL) {
2993 goto out;
2996 #endif
2998 mce_init(cpu);
3000 #ifndef CONFIG_USER_ONLY
3001 if (tcg_enabled()) {
3002 AddressSpace *newas = g_new(AddressSpace, 1);
3004 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3005 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3007 /* Outer container... */
3008 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3009 memory_region_set_enabled(cpu->cpu_as_root, true);
3011 /* ... with two regions inside: normal system memory with low
3012 * priority, and...
3014 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3015 get_system_memory(), 0, ~0ull);
3016 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3017 memory_region_set_enabled(cpu->cpu_as_mem, true);
3018 address_space_init(newas, cpu->cpu_as_root, "CPU");
3019 cs->num_ases = 1;
3020 cpu_address_space_init(cs, newas, 0);
3022 /* ... SMRAM with higher priority, linked from /machine/smram. */
3023 cpu->machine_done.notify = x86_cpu_machine_done;
3024 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3026 #endif
3028 qemu_init_vcpu(cs);
3030 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3031 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3032 * based on inputs (sockets,cores,threads), it is still better to gives
3033 * users a warning.
3035 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3036 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3038 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3039 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3040 " -smp options properly.");
3041 ht_warned = true;
3044 x86_cpu_apic_realize(cpu, &local_err);
3045 if (local_err != NULL) {
3046 goto out;
3048 cpu_reset(cs);
3050 xcc->parent_realize(dev, &local_err);
3052 out:
3053 if (local_err != NULL) {
3054 error_propagate(errp, local_err);
3055 return;
3059 typedef struct BitProperty {
3060 uint32_t *ptr;
3061 uint32_t mask;
3062 } BitProperty;
3064 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3065 void *opaque, Error **errp)
3067 BitProperty *fp = opaque;
3068 bool value = (*fp->ptr & fp->mask) == fp->mask;
3069 visit_type_bool(v, name, &value, errp);
3072 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3073 void *opaque, Error **errp)
3075 DeviceState *dev = DEVICE(obj);
3076 BitProperty *fp = opaque;
3077 Error *local_err = NULL;
3078 bool value;
3080 if (dev->realized) {
3081 qdev_prop_set_after_realize(dev, name, errp);
3082 return;
3085 visit_type_bool(v, name, &value, &local_err);
3086 if (local_err) {
3087 error_propagate(errp, local_err);
3088 return;
3091 if (value) {
3092 *fp->ptr |= fp->mask;
3093 } else {
3094 *fp->ptr &= ~fp->mask;
3098 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3099 void *opaque)
3101 BitProperty *prop = opaque;
3102 g_free(prop);
3105 /* Register a boolean property to get/set a single bit in a uint32_t field.
3107 * The same property name can be registered multiple times to make it affect
3108 * multiple bits in the same FeatureWord. In that case, the getter will return
3109 * true only if all bits are set.
3111 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3112 const char *prop_name,
3113 uint32_t *field,
3114 int bitnr)
3116 BitProperty *fp;
3117 ObjectProperty *op;
3118 uint32_t mask = (1UL << bitnr);
3120 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3121 if (op) {
3122 fp = op->opaque;
3123 assert(fp->ptr == field);
3124 fp->mask |= mask;
3125 } else {
3126 fp = g_new0(BitProperty, 1);
3127 fp->ptr = field;
3128 fp->mask = mask;
3129 object_property_add(OBJECT(cpu), prop_name, "bool",
3130 x86_cpu_get_bit_prop,
3131 x86_cpu_set_bit_prop,
3132 x86_cpu_release_bit_prop, fp, &error_abort);
3136 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3137 FeatureWord w,
3138 int bitnr)
3140 Object *obj = OBJECT(cpu);
3141 int i;
3142 char **names;
3143 FeatureWordInfo *fi = &feature_word_info[w];
3145 if (!fi->feat_names) {
3146 return;
3148 if (!fi->feat_names[bitnr]) {
3149 return;
3152 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3154 feat2prop(names[0]);
3155 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3157 for (i = 1; names[i]; i++) {
3158 feat2prop(names[i]);
3159 object_property_add_alias(obj, names[i], obj, names[0],
3160 &error_abort);
3163 g_strfreev(names);
3166 static void x86_cpu_initfn(Object *obj)
3168 CPUState *cs = CPU(obj);
3169 X86CPU *cpu = X86_CPU(obj);
3170 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3171 CPUX86State *env = &cpu->env;
3172 FeatureWord w;
3174 cs->env_ptr = env;
3176 object_property_add(obj, "family", "int",
3177 x86_cpuid_version_get_family,
3178 x86_cpuid_version_set_family, NULL, NULL, NULL);
3179 object_property_add(obj, "model", "int",
3180 x86_cpuid_version_get_model,
3181 x86_cpuid_version_set_model, NULL, NULL, NULL);
3182 object_property_add(obj, "stepping", "int",
3183 x86_cpuid_version_get_stepping,
3184 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3185 object_property_add_str(obj, "vendor",
3186 x86_cpuid_get_vendor,
3187 x86_cpuid_set_vendor, NULL);
3188 object_property_add_str(obj, "model-id",
3189 x86_cpuid_get_model_id,
3190 x86_cpuid_set_model_id, NULL);
3191 object_property_add(obj, "tsc-frequency", "int",
3192 x86_cpuid_get_tsc_freq,
3193 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3194 object_property_add(obj, "apic-id", "int",
3195 x86_cpuid_get_apic_id,
3196 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3197 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3198 x86_cpu_get_feature_words,
3199 NULL, NULL, (void *)env->features, NULL);
3200 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3201 x86_cpu_get_feature_words,
3202 NULL, NULL, (void *)cpu->filtered_features, NULL);
3204 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3206 #ifndef CONFIG_USER_ONLY
3207 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3208 cpu->apic_id = -1;
3209 #endif
3211 for (w = 0; w < FEATURE_WORDS; w++) {
3212 int bitnr;
3214 for (bitnr = 0; bitnr < 32; bitnr++) {
3215 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3219 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3222 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3224 X86CPU *cpu = X86_CPU(cs);
3226 return cpu->apic_id;
3229 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3231 X86CPU *cpu = X86_CPU(cs);
3233 return cpu->env.cr[0] & CR0_PG_MASK;
3236 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3238 X86CPU *cpu = X86_CPU(cs);
3240 cpu->env.eip = value;
3243 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3245 X86CPU *cpu = X86_CPU(cs);
3247 cpu->env.eip = tb->pc - tb->cs_base;
3250 static bool x86_cpu_has_work(CPUState *cs)
3252 X86CPU *cpu = X86_CPU(cs);
3253 CPUX86State *env = &cpu->env;
3255 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3256 CPU_INTERRUPT_POLL)) &&
3257 (env->eflags & IF_MASK)) ||
3258 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3259 CPU_INTERRUPT_INIT |
3260 CPU_INTERRUPT_SIPI |
3261 CPU_INTERRUPT_MCE)) ||
3262 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3263 !(env->hflags & HF_SMM_MASK));
3266 static Property x86_cpu_properties[] = {
3267 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3268 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3269 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3270 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3271 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3272 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3273 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3274 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3275 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3276 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3277 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3278 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3279 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3280 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3281 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3282 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3283 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3284 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3285 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3286 DEFINE_PROP_END_OF_LIST()
3289 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3291 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3292 CPUClass *cc = CPU_CLASS(oc);
3293 DeviceClass *dc = DEVICE_CLASS(oc);
3295 xcc->parent_realize = dc->realize;
3296 dc->realize = x86_cpu_realizefn;
3297 dc->props = x86_cpu_properties;
3299 xcc->parent_reset = cc->reset;
3300 cc->reset = x86_cpu_reset;
3301 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3303 cc->class_by_name = x86_cpu_class_by_name;
3304 cc->parse_features = x86_cpu_parse_featurestr;
3305 cc->has_work = x86_cpu_has_work;
3306 cc->do_interrupt = x86_cpu_do_interrupt;
3307 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3308 cc->dump_state = x86_cpu_dump_state;
3309 cc->set_pc = x86_cpu_set_pc;
3310 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3311 cc->gdb_read_register = x86_cpu_gdb_read_register;
3312 cc->gdb_write_register = x86_cpu_gdb_write_register;
3313 cc->get_arch_id = x86_cpu_get_arch_id;
3314 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3315 #ifdef CONFIG_USER_ONLY
3316 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3317 #else
3318 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3319 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3320 cc->write_elf64_note = x86_cpu_write_elf64_note;
3321 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3322 cc->write_elf32_note = x86_cpu_write_elf32_note;
3323 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3324 cc->vmsd = &vmstate_x86_cpu;
3325 #endif
3326 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3327 #ifndef CONFIG_USER_ONLY
3328 cc->debug_excp_handler = breakpoint_handler;
3329 #endif
3330 cc->cpu_exec_enter = x86_cpu_exec_enter;
3331 cc->cpu_exec_exit = x86_cpu_exec_exit;
3334 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3335 * object in cpus -> dangling pointer after final object_unref().
3337 dc->cannot_destroy_with_object_finalize_yet = true;
3340 static const TypeInfo x86_cpu_type_info = {
3341 .name = TYPE_X86_CPU,
3342 .parent = TYPE_CPU,
3343 .instance_size = sizeof(X86CPU),
3344 .instance_init = x86_cpu_initfn,
3345 .abstract = true,
3346 .class_size = sizeof(X86CPUClass),
3347 .class_init = x86_cpu_common_class_init,
3350 static void x86_cpu_register_types(void)
3352 int i;
3354 type_register_static(&x86_cpu_type_info);
3355 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3356 x86_register_cpudef_type(&builtin_x86_defs[i]);
3358 #ifdef CONFIG_KVM
3359 type_register_static(&host_x86_cpu_type_info);
3360 #endif
3363 type_init(x86_cpu_register_types)