e1000e: remove unnecessary internal msi state flag
[qemu/ar7.git] / target-i386 / cpu.c
blob3bd3cfc3ad16575c21488aaf8f4f52a2237ecd0b
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
473 #undef REGISTER
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = offsetof(X86XSaveArea, avx_state),
479 .size = sizeof(XSaveAVX) },
480 [XSTATE_BNDREGS_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = offsetof(X86XSaveArea, bndreg_state),
483 .size = sizeof(XSaveBNDREG) },
484 [XSTATE_BNDCSR_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
486 .offset = offsetof(X86XSaveArea, bndcsr_state),
487 .size = sizeof(XSaveBNDCSR) },
488 [XSTATE_OPMASK_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = offsetof(X86XSaveArea, opmask_state),
491 .size = sizeof(XSaveOpmask) },
492 [XSTATE_ZMM_Hi256_BIT] =
493 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
494 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
495 .size = sizeof(XSaveZMM_Hi256) },
496 [XSTATE_Hi16_ZMM_BIT] =
497 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
498 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
499 .size = sizeof(XSaveHi16_ZMM) },
500 [XSTATE_PKRU_BIT] =
501 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
502 .offset = offsetof(X86XSaveArea, pkru_state),
503 .size = sizeof(XSavePKRU) },
506 const char *get_register_name_32(unsigned int reg)
508 if (reg >= CPU_NB_REGS32) {
509 return NULL;
511 return x86_reg_info_32[reg].name;
515 * Returns the set of feature flags that are supported and migratable by
516 * QEMU, for a given FeatureWord.
518 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
520 FeatureWordInfo *wi = &feature_word_info[w];
521 uint32_t r = 0;
522 int i;
524 for (i = 0; i < 32; i++) {
525 uint32_t f = 1U << i;
526 /* If the feature name is unknown, it is not supported by QEMU yet */
527 if (!wi->feat_names[i]) {
528 continue;
530 /* Skip features known to QEMU, but explicitly marked as unmigratable */
531 if (wi->unmigratable_flags & f) {
532 continue;
534 r |= f;
536 return r;
539 void host_cpuid(uint32_t function, uint32_t count,
540 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
542 uint32_t vec[4];
544 #ifdef __x86_64__
545 asm volatile("cpuid"
546 : "=a"(vec[0]), "=b"(vec[1]),
547 "=c"(vec[2]), "=d"(vec[3])
548 : "0"(function), "c"(count) : "cc");
549 #elif defined(__i386__)
550 asm volatile("pusha \n\t"
551 "cpuid \n\t"
552 "mov %%eax, 0(%2) \n\t"
553 "mov %%ebx, 4(%2) \n\t"
554 "mov %%ecx, 8(%2) \n\t"
555 "mov %%edx, 12(%2) \n\t"
556 "popa"
557 : : "a"(function), "c"(count), "S"(vec)
558 : "memory", "cc");
559 #else
560 abort();
561 #endif
563 if (eax)
564 *eax = vec[0];
565 if (ebx)
566 *ebx = vec[1];
567 if (ecx)
568 *ecx = vec[2];
569 if (edx)
570 *edx = vec[3];
573 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
575 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
576 * a substring. ex if !NULL points to the first char after a substring,
577 * otherwise the string is assumed to sized by a terminating nul.
578 * Return lexical ordering of *s1:*s2.
580 static int sstrcmp(const char *s1, const char *e1,
581 const char *s2, const char *e2)
583 for (;;) {
584 if (!*s1 || !*s2 || *s1 != *s2)
585 return (*s1 - *s2);
586 ++s1, ++s2;
587 if (s1 == e1 && s2 == e2)
588 return (0);
589 else if (s1 == e1)
590 return (*s2);
591 else if (s2 == e2)
592 return (*s1);
596 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
597 * '|' delimited (possibly empty) strings in which case search for a match
598 * within the alternatives proceeds left to right. Return 0 for success,
599 * non-zero otherwise.
601 static int altcmp(const char *s, const char *e, const char *altstr)
603 const char *p, *q;
605 for (q = p = altstr; ; ) {
606 while (*p && *p != '|')
607 ++p;
608 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
609 return (0);
610 if (!*p)
611 return (1);
612 else
613 q = ++p;
617 /* search featureset for flag *[s..e), if found set corresponding bit in
618 * *pval and return true, otherwise return false
620 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
621 const char **featureset)
623 uint32_t mask;
624 const char **ppc;
625 bool found = false;
627 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
628 if (*ppc && !altcmp(s, e, *ppc)) {
629 *pval |= mask;
630 found = true;
633 return found;
636 static void add_flagname_to_bitmaps(const char *flagname,
637 FeatureWordArray words,
638 Error **errp)
640 FeatureWord w;
641 for (w = 0; w < FEATURE_WORDS; w++) {
642 FeatureWordInfo *wi = &feature_word_info[w];
643 if (wi->feat_names &&
644 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
645 break;
648 if (w == FEATURE_WORDS) {
649 error_setg(errp, "CPU feature %s not found", flagname);
653 /* CPU class name definitions: */
655 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
658 /* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
661 static char *x86_cpu_type_name(const char *model_name)
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
666 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
668 ObjectClass *oc;
669 char *typename;
671 if (cpu_model == NULL) {
672 return NULL;
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
685 return g_strndup(class_name,
686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
689 struct X86CPUDefinition {
690 const char *name;
691 uint32_t level;
692 uint32_t xlevel;
693 uint32_t xlevel2;
694 /* vendor is zero-terminated, 12 character ASCII string */
695 char vendor[CPUID_VENDOR_SZ + 1];
696 int family;
697 int model;
698 int stepping;
699 FeatureWordArray features;
700 char model_id[48];
703 static X86CPUDefinition builtin_x86_defs[] = {
705 .name = "qemu64",
706 .level = 0xd,
707 .vendor = CPUID_VENDOR_AMD,
708 .family = 6,
709 .model = 6,
710 .stepping = 3,
711 .features[FEAT_1_EDX] =
712 PPRO_FEATURES |
713 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
714 CPUID_PSE36,
715 .features[FEAT_1_ECX] =
716 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
717 .features[FEAT_8000_0001_EDX] =
718 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
719 .features[FEAT_8000_0001_ECX] =
720 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
721 .xlevel = 0x8000000A,
722 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
725 .name = "phenom",
726 .level = 5,
727 .vendor = CPUID_VENDOR_AMD,
728 .family = 16,
729 .model = 2,
730 .stepping = 3,
731 /* Missing: CPUID_HT */
732 .features[FEAT_1_EDX] =
733 PPRO_FEATURES |
734 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
735 CPUID_PSE36 | CPUID_VME,
736 .features[FEAT_1_ECX] =
737 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
738 CPUID_EXT_POPCNT,
739 .features[FEAT_8000_0001_EDX] =
740 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
741 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
742 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
743 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
744 CPUID_EXT3_CR8LEG,
745 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
746 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
747 .features[FEAT_8000_0001_ECX] =
748 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
749 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
750 /* Missing: CPUID_SVM_LBRV */
751 .features[FEAT_SVM] =
752 CPUID_SVM_NPT,
753 .xlevel = 0x8000001A,
754 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
757 .name = "core2duo",
758 .level = 10,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 6,
761 .model = 15,
762 .stepping = 11,
763 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES |
766 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
767 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
768 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
769 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
770 .features[FEAT_1_ECX] =
771 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
772 CPUID_EXT_CX16,
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 .features[FEAT_8000_0001_ECX] =
776 CPUID_EXT3_LAHF_LM,
777 .xlevel = 0x80000008,
778 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
781 .name = "kvm64",
782 .level = 0xd,
783 .vendor = CPUID_VENDOR_INTEL,
784 .family = 15,
785 .model = 6,
786 .stepping = 1,
787 /* Missing: CPUID_HT */
788 .features[FEAT_1_EDX] =
789 PPRO_FEATURES | CPUID_VME |
790 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
791 CPUID_PSE36,
792 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
795 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
796 .features[FEAT_8000_0001_EDX] =
797 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
798 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
799 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
800 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
801 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
802 .features[FEAT_8000_0001_ECX] =
804 .xlevel = 0x80000008,
805 .model_id = "Common KVM processor"
808 .name = "qemu32",
809 .level = 4,
810 .vendor = CPUID_VENDOR_INTEL,
811 .family = 6,
812 .model = 6,
813 .stepping = 3,
814 .features[FEAT_1_EDX] =
815 PPRO_FEATURES,
816 .features[FEAT_1_ECX] =
817 CPUID_EXT_SSE3,
818 .xlevel = 0x80000004,
819 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
822 .name = "kvm32",
823 .level = 5,
824 .vendor = CPUID_VENDOR_INTEL,
825 .family = 15,
826 .model = 6,
827 .stepping = 1,
828 .features[FEAT_1_EDX] =
829 PPRO_FEATURES | CPUID_VME |
830 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
831 .features[FEAT_1_ECX] =
832 CPUID_EXT_SSE3,
833 .features[FEAT_8000_0001_ECX] =
835 .xlevel = 0x80000008,
836 .model_id = "Common 32-bit KVM processor"
839 .name = "coreduo",
840 .level = 10,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 6,
843 .model = 14,
844 .stepping = 8,
845 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
846 .features[FEAT_1_EDX] =
847 PPRO_FEATURES | CPUID_VME |
848 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
849 CPUID_SS,
850 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
851 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
852 .features[FEAT_1_ECX] =
853 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
854 .features[FEAT_8000_0001_EDX] =
855 CPUID_EXT2_NX,
856 .xlevel = 0x80000008,
857 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
860 .name = "486",
861 .level = 1,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 4,
864 .model = 8,
865 .stepping = 0,
866 .features[FEAT_1_EDX] =
867 I486_FEATURES,
868 .xlevel = 0,
871 .name = "pentium",
872 .level = 1,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 5,
875 .model = 4,
876 .stepping = 3,
877 .features[FEAT_1_EDX] =
878 PENTIUM_FEATURES,
879 .xlevel = 0,
882 .name = "pentium2",
883 .level = 2,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 5,
887 .stepping = 2,
888 .features[FEAT_1_EDX] =
889 PENTIUM2_FEATURES,
890 .xlevel = 0,
893 .name = "pentium3",
894 .level = 3,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 7,
898 .stepping = 3,
899 .features[FEAT_1_EDX] =
900 PENTIUM3_FEATURES,
901 .xlevel = 0,
904 .name = "athlon",
905 .level = 2,
906 .vendor = CPUID_VENDOR_AMD,
907 .family = 6,
908 .model = 2,
909 .stepping = 3,
910 .features[FEAT_1_EDX] =
911 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
912 CPUID_MCA,
913 .features[FEAT_8000_0001_EDX] =
914 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
915 .xlevel = 0x80000008,
916 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
919 .name = "n270",
920 .level = 10,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 28,
924 .stepping = 2,
925 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
926 .features[FEAT_1_EDX] =
927 PPRO_FEATURES |
928 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
929 CPUID_ACPI | CPUID_SS,
930 /* Some CPUs got no CPUID_SEP */
931 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
932 * CPUID_EXT_XTPR */
933 .features[FEAT_1_ECX] =
934 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
935 CPUID_EXT_MOVBE,
936 .features[FEAT_8000_0001_EDX] =
937 CPUID_EXT2_NX,
938 .features[FEAT_8000_0001_ECX] =
939 CPUID_EXT3_LAHF_LM,
940 .xlevel = 0x80000008,
941 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
944 .name = "Conroe",
945 .level = 10,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 6,
948 .model = 15,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
952 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
953 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
954 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
955 CPUID_DE | CPUID_FP87,
956 .features[FEAT_1_ECX] =
957 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
958 .features[FEAT_8000_0001_EDX] =
959 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
960 .features[FEAT_8000_0001_ECX] =
961 CPUID_EXT3_LAHF_LM,
962 .xlevel = 0x80000008,
963 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
966 .name = "Penryn",
967 .level = 10,
968 .vendor = CPUID_VENDOR_INTEL,
969 .family = 6,
970 .model = 23,
971 .stepping = 3,
972 .features[FEAT_1_EDX] =
973 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
974 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
975 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
976 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
977 CPUID_DE | CPUID_FP87,
978 .features[FEAT_1_ECX] =
979 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
980 CPUID_EXT_SSE3,
981 .features[FEAT_8000_0001_EDX] =
982 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
983 .features[FEAT_8000_0001_ECX] =
984 CPUID_EXT3_LAHF_LM,
985 .xlevel = 0x80000008,
986 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
989 .name = "Nehalem",
990 .level = 11,
991 .vendor = CPUID_VENDOR_INTEL,
992 .family = 6,
993 .model = 26,
994 .stepping = 3,
995 .features[FEAT_1_EDX] =
996 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
997 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
998 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
999 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1000 CPUID_DE | CPUID_FP87,
1001 .features[FEAT_1_ECX] =
1002 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1003 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1004 .features[FEAT_8000_0001_EDX] =
1005 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1006 .features[FEAT_8000_0001_ECX] =
1007 CPUID_EXT3_LAHF_LM,
1008 .xlevel = 0x80000008,
1009 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1012 .name = "Westmere",
1013 .level = 11,
1014 .vendor = CPUID_VENDOR_INTEL,
1015 .family = 6,
1016 .model = 44,
1017 .stepping = 1,
1018 .features[FEAT_1_EDX] =
1019 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1020 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1021 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1022 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1023 CPUID_DE | CPUID_FP87,
1024 .features[FEAT_1_ECX] =
1025 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1026 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1027 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .features[FEAT_6_EAX] =
1033 CPUID_6_EAX_ARAT,
1034 .xlevel = 0x80000008,
1035 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1038 .name = "SandyBridge",
1039 .level = 0xd,
1040 .vendor = CPUID_VENDOR_INTEL,
1041 .family = 6,
1042 .model = 42,
1043 .stepping = 1,
1044 .features[FEAT_1_EDX] =
1045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1049 CPUID_DE | CPUID_FP87,
1050 .features[FEAT_1_ECX] =
1051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1052 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1053 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1054 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1055 CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1058 CPUID_EXT2_SYSCALL,
1059 .features[FEAT_8000_0001_ECX] =
1060 CPUID_EXT3_LAHF_LM,
1061 .features[FEAT_XSAVE] =
1062 CPUID_XSAVE_XSAVEOPT,
1063 .features[FEAT_6_EAX] =
1064 CPUID_6_EAX_ARAT,
1065 .xlevel = 0x80000008,
1066 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1069 .name = "IvyBridge",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 58,
1074 .stepping = 9,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1084 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1085 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1086 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1087 .features[FEAT_7_0_EBX] =
1088 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1089 CPUID_7_0_EBX_ERMS,
1090 .features[FEAT_8000_0001_EDX] =
1091 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1092 CPUID_EXT2_SYSCALL,
1093 .features[FEAT_8000_0001_ECX] =
1094 CPUID_EXT3_LAHF_LM,
1095 .features[FEAT_XSAVE] =
1096 CPUID_XSAVE_XSAVEOPT,
1097 .features[FEAT_6_EAX] =
1098 CPUID_6_EAX_ARAT,
1099 .xlevel = 0x80000008,
1100 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1103 .name = "Haswell-noTSX",
1104 .level = 0xd,
1105 .vendor = CPUID_VENDOR_INTEL,
1106 .family = 6,
1107 .model = 60,
1108 .stepping = 1,
1109 .features[FEAT_1_EDX] =
1110 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1117 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1118 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1119 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1121 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1122 .features[FEAT_8000_0001_EDX] =
1123 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1124 CPUID_EXT2_SYSCALL,
1125 .features[FEAT_8000_0001_ECX] =
1126 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1127 .features[FEAT_7_0_EBX] =
1128 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1129 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1130 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1131 .features[FEAT_XSAVE] =
1132 CPUID_XSAVE_XSAVEOPT,
1133 .features[FEAT_6_EAX] =
1134 CPUID_6_EAX_ARAT,
1135 .xlevel = 0x80000008,
1136 .model_id = "Intel Core Processor (Haswell, no TSX)",
1137 }, {
1138 .name = "Haswell",
1139 .level = 0xd,
1140 .vendor = CPUID_VENDOR_INTEL,
1141 .family = 6,
1142 .model = 60,
1143 .stepping = 1,
1144 .features[FEAT_1_EDX] =
1145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1149 CPUID_DE | CPUID_FP87,
1150 .features[FEAT_1_ECX] =
1151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_8000_0001_EDX] =
1158 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1159 CPUID_EXT2_SYSCALL,
1160 .features[FEAT_8000_0001_ECX] =
1161 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1162 .features[FEAT_7_0_EBX] =
1163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1166 CPUID_7_0_EBX_RTM,
1167 .features[FEAT_XSAVE] =
1168 CPUID_XSAVE_XSAVEOPT,
1169 .features[FEAT_6_EAX] =
1170 CPUID_6_EAX_ARAT,
1171 .xlevel = 0x80000008,
1172 .model_id = "Intel Core Processor (Haswell)",
1175 .name = "Broadwell-noTSX",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_INTEL,
1178 .family = 6,
1179 .model = 61,
1180 .stepping = 2,
1181 .features[FEAT_1_EDX] =
1182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1194 .features[FEAT_8000_0001_EDX] =
1195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1196 CPUID_EXT2_SYSCALL,
1197 .features[FEAT_8000_0001_ECX] =
1198 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1199 .features[FEAT_7_0_EBX] =
1200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1201 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1203 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1204 CPUID_7_0_EBX_SMAP,
1205 .features[FEAT_XSAVE] =
1206 CPUID_XSAVE_XSAVEOPT,
1207 .features[FEAT_6_EAX] =
1208 CPUID_6_EAX_ARAT,
1209 .xlevel = 0x80000008,
1210 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1213 .name = "Broadwell",
1214 .level = 0xd,
1215 .vendor = CPUID_VENDOR_INTEL,
1216 .family = 6,
1217 .model = 61,
1218 .stepping = 2,
1219 .features[FEAT_1_EDX] =
1220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1224 CPUID_DE | CPUID_FP87,
1225 .features[FEAT_1_ECX] =
1226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1227 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1228 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1229 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1230 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1231 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1232 .features[FEAT_8000_0001_EDX] =
1233 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1234 CPUID_EXT2_SYSCALL,
1235 .features[FEAT_8000_0001_ECX] =
1236 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1237 .features[FEAT_7_0_EBX] =
1238 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1239 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1240 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1241 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1242 CPUID_7_0_EBX_SMAP,
1243 .features[FEAT_XSAVE] =
1244 CPUID_XSAVE_XSAVEOPT,
1245 .features[FEAT_6_EAX] =
1246 CPUID_6_EAX_ARAT,
1247 .xlevel = 0x80000008,
1248 .model_id = "Intel Core Processor (Broadwell)",
1251 .name = "Skylake-Client",
1252 .level = 0xd,
1253 .vendor = CPUID_VENDOR_INTEL,
1254 .family = 6,
1255 .model = 94,
1256 .stepping = 3,
1257 .features[FEAT_1_EDX] =
1258 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1259 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1260 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1261 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1262 CPUID_DE | CPUID_FP87,
1263 .features[FEAT_1_ECX] =
1264 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1265 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1266 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1267 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1268 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1269 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1270 .features[FEAT_8000_0001_EDX] =
1271 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1272 CPUID_EXT2_SYSCALL,
1273 .features[FEAT_8000_0001_ECX] =
1274 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1275 .features[FEAT_7_0_EBX] =
1276 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1277 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1278 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1279 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1280 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1281 /* Missing: XSAVES (not supported by some Linux versions,
1282 * including v4.1 to v4.6).
1283 * KVM doesn't yet expose any XSAVES state save component,
1284 * and the only one defined in Skylake (processor tracing)
1285 * probably will block migration anyway.
1287 .features[FEAT_XSAVE] =
1288 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1289 CPUID_XSAVE_XGETBV1,
1290 .features[FEAT_6_EAX] =
1291 CPUID_6_EAX_ARAT,
1292 .xlevel = 0x80000008,
1293 .model_id = "Intel Core Processor (Skylake)",
1296 .name = "Opteron_G1",
1297 .level = 5,
1298 .vendor = CPUID_VENDOR_AMD,
1299 .family = 15,
1300 .model = 6,
1301 .stepping = 1,
1302 .features[FEAT_1_EDX] =
1303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1307 CPUID_DE | CPUID_FP87,
1308 .features[FEAT_1_ECX] =
1309 CPUID_EXT_SSE3,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1312 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1313 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1314 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1315 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1316 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .xlevel = 0x80000008,
1318 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1321 .name = "Opteron_G2",
1322 .level = 5,
1323 .vendor = CPUID_VENDOR_AMD,
1324 .family = 15,
1325 .model = 6,
1326 .stepping = 1,
1327 .features[FEAT_1_EDX] =
1328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1332 CPUID_DE | CPUID_FP87,
1333 .features[FEAT_1_ECX] =
1334 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1335 /* Missing: CPUID_EXT2_RDTSCP */
1336 .features[FEAT_8000_0001_EDX] =
1337 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1338 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1339 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1340 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1341 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1342 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1343 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1344 .features[FEAT_8000_0001_ECX] =
1345 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1346 .xlevel = 0x80000008,
1347 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1350 .name = "Opteron_G3",
1351 .level = 5,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 15,
1354 .model = 6,
1355 .stepping = 1,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1364 CPUID_EXT_SSE3,
1365 /* Missing: CPUID_EXT2_RDTSCP */
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1368 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1369 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1370 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1371 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1372 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1373 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1374 .features[FEAT_8000_0001_ECX] =
1375 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1376 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1377 .xlevel = 0x80000008,
1378 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1381 .name = "Opteron_G4",
1382 .level = 0xd,
1383 .vendor = CPUID_VENDOR_AMD,
1384 .family = 21,
1385 .model = 1,
1386 .stepping = 2,
1387 .features[FEAT_1_EDX] =
1388 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1389 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1390 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1391 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1392 CPUID_DE | CPUID_FP87,
1393 .features[FEAT_1_ECX] =
1394 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1395 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1396 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1397 CPUID_EXT_SSE3,
1398 /* Missing: CPUID_EXT2_RDTSCP */
1399 .features[FEAT_8000_0001_EDX] =
1400 CPUID_EXT2_LM |
1401 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1402 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1403 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1404 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1405 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1406 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1407 .features[FEAT_8000_0001_ECX] =
1408 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1409 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1410 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1411 CPUID_EXT3_LAHF_LM,
1412 /* no xsaveopt! */
1413 .xlevel = 0x8000001A,
1414 .model_id = "AMD Opteron 62xx class CPU",
1417 .name = "Opteron_G5",
1418 .level = 0xd,
1419 .vendor = CPUID_VENDOR_AMD,
1420 .family = 21,
1421 .model = 2,
1422 .stepping = 0,
1423 .features[FEAT_1_EDX] =
1424 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1425 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1426 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1427 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1428 CPUID_DE | CPUID_FP87,
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1431 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1432 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1433 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1434 /* Missing: CPUID_EXT2_RDTSCP */
1435 .features[FEAT_8000_0001_EDX] =
1436 CPUID_EXT2_LM |
1437 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1438 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1439 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1440 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1441 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1442 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1443 .features[FEAT_8000_0001_ECX] =
1444 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1445 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1446 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1447 CPUID_EXT3_LAHF_LM,
1448 /* no xsaveopt! */
1449 .xlevel = 0x8000001A,
1450 .model_id = "AMD Opteron 63xx class CPU",
1454 typedef struct PropValue {
1455 const char *prop, *value;
1456 } PropValue;
1458 /* KVM-specific features that are automatically added/removed
1459 * from all CPU models when KVM is enabled.
1461 static PropValue kvm_default_props[] = {
1462 { "kvmclock", "on" },
1463 { "kvm-nopiodelay", "on" },
1464 { "kvm-asyncpf", "on" },
1465 { "kvm-steal-time", "on" },
1466 { "kvm-pv-eoi", "on" },
1467 { "kvmclock-stable-bit", "on" },
1468 { "x2apic", "on" },
1469 { "acpi", "off" },
1470 { "monitor", "off" },
1471 { "svm", "off" },
1472 { NULL, NULL },
1475 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1477 PropValue *pv;
1478 for (pv = kvm_default_props; pv->prop; pv++) {
1479 if (!strcmp(pv->prop, prop)) {
1480 pv->value = value;
1481 break;
1485 /* It is valid to call this function only for properties that
1486 * are already present in the kvm_default_props table.
1488 assert(pv->prop);
1491 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1492 bool migratable_only);
1494 #ifdef CONFIG_KVM
1496 static int cpu_x86_fill_model_id(char *str)
1498 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1499 int i;
1501 for (i = 0; i < 3; i++) {
1502 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1503 memcpy(str + i * 16 + 0, &eax, 4);
1504 memcpy(str + i * 16 + 4, &ebx, 4);
1505 memcpy(str + i * 16 + 8, &ecx, 4);
1506 memcpy(str + i * 16 + 12, &edx, 4);
1508 return 0;
1511 static X86CPUDefinition host_cpudef;
1513 static Property host_x86_cpu_properties[] = {
1514 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1515 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1516 DEFINE_PROP_END_OF_LIST()
1519 /* class_init for the "host" CPU model
1521 * This function may be called before KVM is initialized.
1523 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1525 DeviceClass *dc = DEVICE_CLASS(oc);
1526 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1529 xcc->kvm_required = true;
1531 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1532 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1534 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1535 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1536 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1537 host_cpudef.stepping = eax & 0x0F;
1539 cpu_x86_fill_model_id(host_cpudef.model_id);
1541 xcc->cpu_def = &host_cpudef;
1543 /* level, xlevel, xlevel2, and the feature words are initialized on
1544 * instance_init, because they require KVM to be initialized.
1547 dc->props = host_x86_cpu_properties;
1548 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1549 dc->cannot_destroy_with_object_finalize_yet = true;
1552 static void host_x86_cpu_initfn(Object *obj)
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 KVMState *s = kvm_state;
1558 /* We can't fill the features array here because we don't know yet if
1559 * "migratable" is true or false.
1561 cpu->host_features = true;
1563 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1564 if (kvm_enabled()) {
1565 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1566 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1567 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1570 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1573 static const TypeInfo host_x86_cpu_type_info = {
1574 .name = X86_CPU_TYPE_NAME("host"),
1575 .parent = TYPE_X86_CPU,
1576 .instance_init = host_x86_cpu_initfn,
1577 .class_init = host_x86_cpu_class_init,
1580 #endif
1582 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1584 FeatureWordInfo *f = &feature_word_info[w];
1585 int i;
1587 for (i = 0; i < 32; ++i) {
1588 if ((1UL << i) & mask) {
1589 const char *reg = get_register_name_32(f->cpuid_reg);
1590 assert(reg);
1591 fprintf(stderr, "warning: %s doesn't support requested feature: "
1592 "CPUID.%02XH:%s%s%s [bit %d]\n",
1593 kvm_enabled() ? "host" : "TCG",
1594 f->cpuid_eax, reg,
1595 f->feat_names[i] ? "." : "",
1596 f->feat_names[i] ? f->feat_names[i] : "", i);
1601 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1602 const char *name, void *opaque,
1603 Error **errp)
1605 X86CPU *cpu = X86_CPU(obj);
1606 CPUX86State *env = &cpu->env;
1607 int64_t value;
1609 value = (env->cpuid_version >> 8) & 0xf;
1610 if (value == 0xf) {
1611 value += (env->cpuid_version >> 20) & 0xff;
1613 visit_type_int(v, name, &value, errp);
1616 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1617 const char *name, void *opaque,
1618 Error **errp)
1620 X86CPU *cpu = X86_CPU(obj);
1621 CPUX86State *env = &cpu->env;
1622 const int64_t min = 0;
1623 const int64_t max = 0xff + 0xf;
1624 Error *local_err = NULL;
1625 int64_t value;
1627 visit_type_int(v, name, &value, &local_err);
1628 if (local_err) {
1629 error_propagate(errp, local_err);
1630 return;
1632 if (value < min || value > max) {
1633 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1634 name ? name : "null", value, min, max);
1635 return;
1638 env->cpuid_version &= ~0xff00f00;
1639 if (value > 0x0f) {
1640 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1641 } else {
1642 env->cpuid_version |= value << 8;
1646 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1647 const char *name, void *opaque,
1648 Error **errp)
1650 X86CPU *cpu = X86_CPU(obj);
1651 CPUX86State *env = &cpu->env;
1652 int64_t value;
1654 value = (env->cpuid_version >> 4) & 0xf;
1655 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1656 visit_type_int(v, name, &value, errp);
1659 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1660 const char *name, void *opaque,
1661 Error **errp)
1663 X86CPU *cpu = X86_CPU(obj);
1664 CPUX86State *env = &cpu->env;
1665 const int64_t min = 0;
1666 const int64_t max = 0xff;
1667 Error *local_err = NULL;
1668 int64_t value;
1670 visit_type_int(v, name, &value, &local_err);
1671 if (local_err) {
1672 error_propagate(errp, local_err);
1673 return;
1675 if (value < min || value > max) {
1676 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1677 name ? name : "null", value, min, max);
1678 return;
1681 env->cpuid_version &= ~0xf00f0;
1682 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1685 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1686 const char *name, void *opaque,
1687 Error **errp)
1689 X86CPU *cpu = X86_CPU(obj);
1690 CPUX86State *env = &cpu->env;
1691 int64_t value;
1693 value = env->cpuid_version & 0xf;
1694 visit_type_int(v, name, &value, errp);
1697 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1698 const char *name, void *opaque,
1699 Error **errp)
1701 X86CPU *cpu = X86_CPU(obj);
1702 CPUX86State *env = &cpu->env;
1703 const int64_t min = 0;
1704 const int64_t max = 0xf;
1705 Error *local_err = NULL;
1706 int64_t value;
1708 visit_type_int(v, name, &value, &local_err);
1709 if (local_err) {
1710 error_propagate(errp, local_err);
1711 return;
1713 if (value < min || value > max) {
1714 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1715 name ? name : "null", value, min, max);
1716 return;
1719 env->cpuid_version &= ~0xf;
1720 env->cpuid_version |= value & 0xf;
1723 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1725 X86CPU *cpu = X86_CPU(obj);
1726 CPUX86State *env = &cpu->env;
1727 char *value;
1729 value = g_malloc(CPUID_VENDOR_SZ + 1);
1730 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1731 env->cpuid_vendor3);
1732 return value;
1735 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1736 Error **errp)
1738 X86CPU *cpu = X86_CPU(obj);
1739 CPUX86State *env = &cpu->env;
1740 int i;
1742 if (strlen(value) != CPUID_VENDOR_SZ) {
1743 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1744 return;
1747 env->cpuid_vendor1 = 0;
1748 env->cpuid_vendor2 = 0;
1749 env->cpuid_vendor3 = 0;
1750 for (i = 0; i < 4; i++) {
1751 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1752 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1753 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1757 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1759 X86CPU *cpu = X86_CPU(obj);
1760 CPUX86State *env = &cpu->env;
1761 char *value;
1762 int i;
1764 value = g_malloc(48 + 1);
1765 for (i = 0; i < 48; i++) {
1766 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1768 value[48] = '\0';
1769 return value;
1772 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1773 Error **errp)
1775 X86CPU *cpu = X86_CPU(obj);
1776 CPUX86State *env = &cpu->env;
1777 int c, len, i;
1779 if (model_id == NULL) {
1780 model_id = "";
1782 len = strlen(model_id);
1783 memset(env->cpuid_model, 0, 48);
1784 for (i = 0; i < 48; i++) {
1785 if (i >= len) {
1786 c = '\0';
1787 } else {
1788 c = (uint8_t)model_id[i];
1790 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1794 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1795 void *opaque, Error **errp)
1797 X86CPU *cpu = X86_CPU(obj);
1798 int64_t value;
1800 value = cpu->env.tsc_khz * 1000;
1801 visit_type_int(v, name, &value, errp);
1804 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1805 void *opaque, Error **errp)
1807 X86CPU *cpu = X86_CPU(obj);
1808 const int64_t min = 0;
1809 const int64_t max = INT64_MAX;
1810 Error *local_err = NULL;
1811 int64_t value;
1813 visit_type_int(v, name, &value, &local_err);
1814 if (local_err) {
1815 error_propagate(errp, local_err);
1816 return;
1818 if (value < min || value > max) {
1819 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1820 name ? name : "null", value, min, max);
1821 return;
1824 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1827 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1828 void *opaque, Error **errp)
1830 X86CPU *cpu = X86_CPU(obj);
1831 int64_t value = cpu->apic_id;
1833 visit_type_int(v, name, &value, errp);
1836 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1839 X86CPU *cpu = X86_CPU(obj);
1840 DeviceState *dev = DEVICE(obj);
1841 const int64_t min = 0;
1842 const int64_t max = UINT32_MAX;
1843 Error *error = NULL;
1844 int64_t value;
1846 if (dev->realized) {
1847 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1848 "it was realized", name, object_get_typename(obj));
1849 return;
1852 visit_type_int(v, name, &value, &error);
1853 if (error) {
1854 error_propagate(errp, error);
1855 return;
1857 if (value < min || value > max) {
1858 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1859 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1860 object_get_typename(obj), name, value, min, max);
1861 return;
1864 if ((value != cpu->apic_id) && cpu_exists(value)) {
1865 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1866 return;
1868 cpu->apic_id = value;
1871 /* Generic getter for "feature-words" and "filtered-features" properties */
1872 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1873 const char *name, void *opaque,
1874 Error **errp)
1876 uint32_t *array = (uint32_t *)opaque;
1877 FeatureWord w;
1878 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1879 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1880 X86CPUFeatureWordInfoList *list = NULL;
1882 for (w = 0; w < FEATURE_WORDS; w++) {
1883 FeatureWordInfo *wi = &feature_word_info[w];
1884 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1885 qwi->cpuid_input_eax = wi->cpuid_eax;
1886 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1887 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1888 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1889 qwi->features = array[w];
1891 /* List will be in reverse order, but order shouldn't matter */
1892 list_entries[w].next = list;
1893 list_entries[w].value = &word_infos[w];
1894 list = &list_entries[w];
1897 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1900 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1901 void *opaque, Error **errp)
1903 X86CPU *cpu = X86_CPU(obj);
1904 int64_t value = cpu->hyperv_spinlock_attempts;
1906 visit_type_int(v, name, &value, errp);
1909 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1910 void *opaque, Error **errp)
1912 const int64_t min = 0xFFF;
1913 const int64_t max = UINT_MAX;
1914 X86CPU *cpu = X86_CPU(obj);
1915 Error *err = NULL;
1916 int64_t value;
1918 visit_type_int(v, name, &value, &err);
1919 if (err) {
1920 error_propagate(errp, err);
1921 return;
1924 if (value < min || value > max) {
1925 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1926 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1927 object_get_typename(obj), name ? name : "null",
1928 value, min, max);
1929 return;
1931 cpu->hyperv_spinlock_attempts = value;
1934 static PropertyInfo qdev_prop_spinlocks = {
1935 .name = "int",
1936 .get = x86_get_hv_spinlocks,
1937 .set = x86_set_hv_spinlocks,
1940 /* Convert all '_' in a feature string option name to '-', to make feature
1941 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1943 static inline void feat2prop(char *s)
1945 while ((s = strchr(s, '_'))) {
1946 *s = '-';
1950 /* Compatibily hack to maintain legacy +-feat semantic,
1951 * where +-feat overwrites any feature set by
1952 * feat=on|feat even if the later is parsed after +-feat
1953 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1955 static FeatureWordArray plus_features = { 0 };
1956 static FeatureWordArray minus_features = { 0 };
1958 /* Parse "+feature,-feature,feature=foo" CPU feature string
1960 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1961 Error **errp)
1963 X86CPU *cpu = X86_CPU(cs);
1964 char *featurestr; /* Single 'key=value" string being parsed */
1965 Error *local_err = NULL;
1967 if (!features) {
1968 return;
1971 for (featurestr = strtok(features, ",");
1972 featurestr && !local_err;
1973 featurestr = strtok(NULL, ",")) {
1974 const char *name;
1975 const char *val = NULL;
1976 char *eq = NULL;
1978 /* Compatibility syntax: */
1979 if (featurestr[0] == '+') {
1980 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1981 continue;
1982 } else if (featurestr[0] == '-') {
1983 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1984 continue;
1987 eq = strchr(featurestr, '=');
1988 if (eq) {
1989 *eq++ = 0;
1990 val = eq;
1991 } else {
1992 val = "on";
1995 feat2prop(featurestr);
1996 name = featurestr;
1998 /* Special case: */
1999 if (!strcmp(name, "tsc-freq")) {
2000 int64_t tsc_freq;
2001 char *err;
2002 char num[32];
2004 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2005 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2006 if (tsc_freq < 0 || *err) {
2007 error_setg(errp, "bad numerical value %s", val);
2008 return;
2010 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2011 val = num;
2012 name = "tsc-frequency";
2015 object_property_parse(OBJECT(cpu), val, name, &local_err);
2018 if (local_err) {
2019 error_propagate(errp, local_err);
2023 /* Print all cpuid feature names in featureset
2025 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2027 int bit;
2028 bool first = true;
2030 for (bit = 0; bit < 32; bit++) {
2031 if (featureset[bit]) {
2032 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2033 first = false;
2038 /* generate CPU information. */
2039 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2041 X86CPUDefinition *def;
2042 char buf[256];
2043 int i;
2045 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2046 def = &builtin_x86_defs[i];
2047 snprintf(buf, sizeof(buf), "%s", def->name);
2048 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2050 #ifdef CONFIG_KVM
2051 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2052 "KVM processor with all supported host features "
2053 "(only available in KVM mode)");
2054 #endif
2056 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2057 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2058 FeatureWordInfo *fw = &feature_word_info[i];
2060 (*cpu_fprintf)(f, " ");
2061 listflags(f, cpu_fprintf, fw->feat_names);
2062 (*cpu_fprintf)(f, "\n");
2066 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2068 CpuDefinitionInfoList *cpu_list = NULL;
2069 X86CPUDefinition *def;
2070 int i;
2072 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2073 CpuDefinitionInfoList *entry;
2074 CpuDefinitionInfo *info;
2076 def = &builtin_x86_defs[i];
2077 info = g_malloc0(sizeof(*info));
2078 info->name = g_strdup(def->name);
2080 entry = g_malloc0(sizeof(*entry));
2081 entry->value = info;
2082 entry->next = cpu_list;
2083 cpu_list = entry;
2086 return cpu_list;
2089 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2090 bool migratable_only)
2092 FeatureWordInfo *wi = &feature_word_info[w];
2093 uint32_t r;
2095 if (kvm_enabled()) {
2096 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2097 wi->cpuid_ecx,
2098 wi->cpuid_reg);
2099 } else if (tcg_enabled()) {
2100 r = wi->tcg_features;
2101 } else {
2102 return ~0;
2104 if (migratable_only) {
2105 r &= x86_cpu_get_migratable_flags(w);
2107 return r;
2111 * Filters CPU feature words based on host availability of each feature.
2113 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2115 static int x86_cpu_filter_features(X86CPU *cpu)
2117 CPUX86State *env = &cpu->env;
2118 FeatureWord w;
2119 int rv = 0;
2121 for (w = 0; w < FEATURE_WORDS; w++) {
2122 uint32_t host_feat =
2123 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2124 uint32_t requested_features = env->features[w];
2125 env->features[w] &= host_feat;
2126 cpu->filtered_features[w] = requested_features & ~env->features[w];
2127 if (cpu->filtered_features[w]) {
2128 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2129 report_unavailable_features(w, cpu->filtered_features[w]);
2131 rv = 1;
2135 return rv;
2138 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2140 PropValue *pv;
2141 for (pv = props; pv->prop; pv++) {
2142 if (!pv->value) {
2143 continue;
2145 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2146 &error_abort);
2150 /* Load data from X86CPUDefinition
2152 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2154 CPUX86State *env = &cpu->env;
2155 const char *vendor;
2156 char host_vendor[CPUID_VENDOR_SZ + 1];
2157 FeatureWord w;
2159 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2160 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2161 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2162 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2163 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2164 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2165 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2166 for (w = 0; w < FEATURE_WORDS; w++) {
2167 env->features[w] = def->features[w];
2170 /* Special cases not set in the X86CPUDefinition structs: */
2171 if (kvm_enabled()) {
2172 if (!kvm_irqchip_in_kernel()) {
2173 x86_cpu_change_kvm_default("x2apic", "off");
2176 x86_cpu_apply_props(cpu, kvm_default_props);
2179 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2181 /* sysenter isn't supported in compatibility mode on AMD,
2182 * syscall isn't supported in compatibility mode on Intel.
2183 * Normally we advertise the actual CPU vendor, but you can
2184 * override this using the 'vendor' property if you want to use
2185 * KVM's sysenter/syscall emulation in compatibility mode and
2186 * when doing cross vendor migration
2188 vendor = def->vendor;
2189 if (kvm_enabled()) {
2190 uint32_t ebx = 0, ecx = 0, edx = 0;
2191 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2192 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2193 vendor = host_vendor;
2196 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2200 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2202 X86CPU *cpu = NULL;
2203 ObjectClass *oc;
2204 gchar **model_pieces;
2205 char *name, *features;
2206 Error *error = NULL;
2208 model_pieces = g_strsplit(cpu_model, ",", 2);
2209 if (!model_pieces[0]) {
2210 error_setg(&error, "Invalid/empty CPU model name");
2211 goto out;
2213 name = model_pieces[0];
2214 features = model_pieces[1];
2216 oc = x86_cpu_class_by_name(name);
2217 if (oc == NULL) {
2218 error_setg(&error, "Unable to find CPU definition: %s", name);
2219 goto out;
2222 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2224 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2225 if (error) {
2226 goto out;
2229 out:
2230 if (error != NULL) {
2231 error_propagate(errp, error);
2232 if (cpu) {
2233 object_unref(OBJECT(cpu));
2234 cpu = NULL;
2237 g_strfreev(model_pieces);
2238 return cpu;
2241 X86CPU *cpu_x86_init(const char *cpu_model)
2243 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2246 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2248 X86CPUDefinition *cpudef = data;
2249 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2251 xcc->cpu_def = cpudef;
2254 static void x86_register_cpudef_type(X86CPUDefinition *def)
2256 char *typename = x86_cpu_type_name(def->name);
2257 TypeInfo ti = {
2258 .name = typename,
2259 .parent = TYPE_X86_CPU,
2260 .class_init = x86_cpu_cpudef_class_init,
2261 .class_data = def,
2264 type_register(&ti);
2265 g_free(typename);
2268 #if !defined(CONFIG_USER_ONLY)
2270 void cpu_clear_apic_feature(CPUX86State *env)
2272 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2275 #endif /* !CONFIG_USER_ONLY */
2277 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2278 uint32_t *eax, uint32_t *ebx,
2279 uint32_t *ecx, uint32_t *edx)
2281 X86CPU *cpu = x86_env_get_cpu(env);
2282 CPUState *cs = CPU(cpu);
2284 /* test if maximum index reached */
2285 if (index & 0x80000000) {
2286 if (index > env->cpuid_xlevel) {
2287 if (env->cpuid_xlevel2 > 0) {
2288 /* Handle the Centaur's CPUID instruction. */
2289 if (index > env->cpuid_xlevel2) {
2290 index = env->cpuid_xlevel2;
2291 } else if (index < 0xC0000000) {
2292 index = env->cpuid_xlevel;
2294 } else {
2295 /* Intel documentation states that invalid EAX input will
2296 * return the same information as EAX=cpuid_level
2297 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2299 index = env->cpuid_level;
2302 } else {
2303 if (index > env->cpuid_level)
2304 index = env->cpuid_level;
2307 switch(index) {
2308 case 0:
2309 *eax = env->cpuid_level;
2310 *ebx = env->cpuid_vendor1;
2311 *edx = env->cpuid_vendor2;
2312 *ecx = env->cpuid_vendor3;
2313 break;
2314 case 1:
2315 *eax = env->cpuid_version;
2316 *ebx = (cpu->apic_id << 24) |
2317 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2318 *ecx = env->features[FEAT_1_ECX];
2319 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2320 *ecx |= CPUID_EXT_OSXSAVE;
2322 *edx = env->features[FEAT_1_EDX];
2323 if (cs->nr_cores * cs->nr_threads > 1) {
2324 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2325 *edx |= CPUID_HT;
2327 break;
2328 case 2:
2329 /* cache info: needed for Pentium Pro compatibility */
2330 if (cpu->cache_info_passthrough) {
2331 host_cpuid(index, 0, eax, ebx, ecx, edx);
2332 break;
2334 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2335 *ebx = 0;
2336 *ecx = 0;
2337 *edx = (L1D_DESCRIPTOR << 16) | \
2338 (L1I_DESCRIPTOR << 8) | \
2339 (L2_DESCRIPTOR);
2340 break;
2341 case 4:
2342 /* cache info: needed for Core compatibility */
2343 if (cpu->cache_info_passthrough) {
2344 host_cpuid(index, count, eax, ebx, ecx, edx);
2345 *eax &= ~0xFC000000;
2346 } else {
2347 *eax = 0;
2348 switch (count) {
2349 case 0: /* L1 dcache info */
2350 *eax |= CPUID_4_TYPE_DCACHE | \
2351 CPUID_4_LEVEL(1) | \
2352 CPUID_4_SELF_INIT_LEVEL;
2353 *ebx = (L1D_LINE_SIZE - 1) | \
2354 ((L1D_PARTITIONS - 1) << 12) | \
2355 ((L1D_ASSOCIATIVITY - 1) << 22);
2356 *ecx = L1D_SETS - 1;
2357 *edx = CPUID_4_NO_INVD_SHARING;
2358 break;
2359 case 1: /* L1 icache info */
2360 *eax |= CPUID_4_TYPE_ICACHE | \
2361 CPUID_4_LEVEL(1) | \
2362 CPUID_4_SELF_INIT_LEVEL;
2363 *ebx = (L1I_LINE_SIZE - 1) | \
2364 ((L1I_PARTITIONS - 1) << 12) | \
2365 ((L1I_ASSOCIATIVITY - 1) << 22);
2366 *ecx = L1I_SETS - 1;
2367 *edx = CPUID_4_NO_INVD_SHARING;
2368 break;
2369 case 2: /* L2 cache info */
2370 *eax |= CPUID_4_TYPE_UNIFIED | \
2371 CPUID_4_LEVEL(2) | \
2372 CPUID_4_SELF_INIT_LEVEL;
2373 if (cs->nr_threads > 1) {
2374 *eax |= (cs->nr_threads - 1) << 14;
2376 *ebx = (L2_LINE_SIZE - 1) | \
2377 ((L2_PARTITIONS - 1) << 12) | \
2378 ((L2_ASSOCIATIVITY - 1) << 22);
2379 *ecx = L2_SETS - 1;
2380 *edx = CPUID_4_NO_INVD_SHARING;
2381 break;
2382 default: /* end of info */
2383 *eax = 0;
2384 *ebx = 0;
2385 *ecx = 0;
2386 *edx = 0;
2387 break;
2391 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2392 if ((*eax & 31) && cs->nr_cores > 1) {
2393 *eax |= (cs->nr_cores - 1) << 26;
2395 break;
2396 case 5:
2397 /* mwait info: needed for Core compatibility */
2398 *eax = 0; /* Smallest monitor-line size in bytes */
2399 *ebx = 0; /* Largest monitor-line size in bytes */
2400 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2401 *edx = 0;
2402 break;
2403 case 6:
2404 /* Thermal and Power Leaf */
2405 *eax = env->features[FEAT_6_EAX];
2406 *ebx = 0;
2407 *ecx = 0;
2408 *edx = 0;
2409 break;
2410 case 7:
2411 /* Structured Extended Feature Flags Enumeration Leaf */
2412 if (count == 0) {
2413 *eax = 0; /* Maximum ECX value for sub-leaves */
2414 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2415 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2416 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2417 *ecx |= CPUID_7_0_ECX_OSPKE;
2419 *edx = 0; /* Reserved */
2420 } else {
2421 *eax = 0;
2422 *ebx = 0;
2423 *ecx = 0;
2424 *edx = 0;
2426 break;
2427 case 9:
2428 /* Direct Cache Access Information Leaf */
2429 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2430 *ebx = 0;
2431 *ecx = 0;
2432 *edx = 0;
2433 break;
2434 case 0xA:
2435 /* Architectural Performance Monitoring Leaf */
2436 if (kvm_enabled() && cpu->enable_pmu) {
2437 KVMState *s = cs->kvm_state;
2439 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2440 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2441 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2442 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2443 } else {
2444 *eax = 0;
2445 *ebx = 0;
2446 *ecx = 0;
2447 *edx = 0;
2449 break;
2450 case 0xB:
2451 /* Extended Topology Enumeration Leaf */
2452 if (!cpu->enable_cpuid_0xb) {
2453 *eax = *ebx = *ecx = *edx = 0;
2454 break;
2457 *ecx = count & 0xff;
2458 *edx = cpu->apic_id;
2460 switch (count) {
2461 case 0:
2462 *eax = apicid_core_offset(smp_cores, smp_threads);
2463 *ebx = smp_threads;
2464 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2465 break;
2466 case 1:
2467 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2468 *ebx = smp_cores * smp_threads;
2469 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2470 break;
2471 default:
2472 *eax = 0;
2473 *ebx = 0;
2474 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2477 assert(!(*eax & ~0x1f));
2478 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2479 break;
2480 case 0xD: {
2481 KVMState *s = cs->kvm_state;
2482 uint64_t ena_mask;
2483 int i;
2485 /* Processor Extended State */
2486 *eax = 0;
2487 *ebx = 0;
2488 *ecx = 0;
2489 *edx = 0;
2490 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2491 break;
2493 if (kvm_enabled()) {
2494 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2495 ena_mask <<= 32;
2496 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2497 } else {
2498 ena_mask = -1;
2501 if (count == 0) {
2502 *ecx = 0x240;
2503 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2504 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2505 if ((env->features[esa->feature] & esa->bits) == esa->bits
2506 && ((ena_mask >> i) & 1) != 0) {
2507 if (i < 32) {
2508 *eax |= 1u << i;
2509 } else {
2510 *edx |= 1u << (i - 32);
2512 *ecx = MAX(*ecx, esa->offset + esa->size);
2515 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2516 *ebx = *ecx;
2517 } else if (count == 1) {
2518 *eax = env->features[FEAT_XSAVE];
2519 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2520 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2521 if ((env->features[esa->feature] & esa->bits) == esa->bits
2522 && ((ena_mask >> count) & 1) != 0) {
2523 *eax = esa->size;
2524 *ebx = esa->offset;
2527 break;
2529 case 0x80000000:
2530 *eax = env->cpuid_xlevel;
2531 *ebx = env->cpuid_vendor1;
2532 *edx = env->cpuid_vendor2;
2533 *ecx = env->cpuid_vendor3;
2534 break;
2535 case 0x80000001:
2536 *eax = env->cpuid_version;
2537 *ebx = 0;
2538 *ecx = env->features[FEAT_8000_0001_ECX];
2539 *edx = env->features[FEAT_8000_0001_EDX];
2541 /* The Linux kernel checks for the CMPLegacy bit and
2542 * discards multiple thread information if it is set.
2543 * So don't set it here for Intel to make Linux guests happy.
2545 if (cs->nr_cores * cs->nr_threads > 1) {
2546 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2547 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2548 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2549 *ecx |= 1 << 1; /* CmpLegacy bit */
2552 break;
2553 case 0x80000002:
2554 case 0x80000003:
2555 case 0x80000004:
2556 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2557 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2558 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2559 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2560 break;
2561 case 0x80000005:
2562 /* cache info (L1 cache) */
2563 if (cpu->cache_info_passthrough) {
2564 host_cpuid(index, 0, eax, ebx, ecx, edx);
2565 break;
2567 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2568 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2569 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2570 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2571 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2572 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2573 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2574 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2575 break;
2576 case 0x80000006:
2577 /* cache info (L2 cache) */
2578 if (cpu->cache_info_passthrough) {
2579 host_cpuid(index, 0, eax, ebx, ecx, edx);
2580 break;
2582 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2583 (L2_DTLB_2M_ENTRIES << 16) | \
2584 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2585 (L2_ITLB_2M_ENTRIES);
2586 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2587 (L2_DTLB_4K_ENTRIES << 16) | \
2588 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2589 (L2_ITLB_4K_ENTRIES);
2590 *ecx = (L2_SIZE_KB_AMD << 16) | \
2591 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2592 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2593 *edx = ((L3_SIZE_KB/512) << 18) | \
2594 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2595 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2596 break;
2597 case 0x80000007:
2598 *eax = 0;
2599 *ebx = 0;
2600 *ecx = 0;
2601 *edx = env->features[FEAT_8000_0007_EDX];
2602 break;
2603 case 0x80000008:
2604 /* virtual & phys address size in low 2 bytes. */
2605 /* XXX: This value must match the one used in the MMU code. */
2606 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2607 /* 64 bit processor */
2608 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2609 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2610 } else {
2611 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2612 *eax = 0x00000024; /* 36 bits physical */
2613 } else {
2614 *eax = 0x00000020; /* 32 bits physical */
2617 *ebx = 0;
2618 *ecx = 0;
2619 *edx = 0;
2620 if (cs->nr_cores * cs->nr_threads > 1) {
2621 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2623 break;
2624 case 0x8000000A:
2625 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2626 *eax = 0x00000001; /* SVM Revision */
2627 *ebx = 0x00000010; /* nr of ASIDs */
2628 *ecx = 0;
2629 *edx = env->features[FEAT_SVM]; /* optional features */
2630 } else {
2631 *eax = 0;
2632 *ebx = 0;
2633 *ecx = 0;
2634 *edx = 0;
2636 break;
2637 case 0xC0000000:
2638 *eax = env->cpuid_xlevel2;
2639 *ebx = 0;
2640 *ecx = 0;
2641 *edx = 0;
2642 break;
2643 case 0xC0000001:
2644 /* Support for VIA CPU's CPUID instruction */
2645 *eax = env->cpuid_version;
2646 *ebx = 0;
2647 *ecx = 0;
2648 *edx = env->features[FEAT_C000_0001_EDX];
2649 break;
2650 case 0xC0000002:
2651 case 0xC0000003:
2652 case 0xC0000004:
2653 /* Reserved for the future, and now filled with zero */
2654 *eax = 0;
2655 *ebx = 0;
2656 *ecx = 0;
2657 *edx = 0;
2658 break;
2659 default:
2660 /* reserved values: zero */
2661 *eax = 0;
2662 *ebx = 0;
2663 *ecx = 0;
2664 *edx = 0;
2665 break;
2669 /* CPUClass::reset() */
2670 static void x86_cpu_reset(CPUState *s)
2672 X86CPU *cpu = X86_CPU(s);
2673 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2674 CPUX86State *env = &cpu->env;
2675 target_ulong cr4;
2676 uint64_t xcr0;
2677 int i;
2679 xcc->parent_reset(s);
2681 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2683 tlb_flush(s, 1);
2685 env->old_exception = -1;
2687 /* init to reset state */
2689 #ifdef CONFIG_SOFTMMU
2690 env->hflags |= HF_SOFTMMU_MASK;
2691 #endif
2692 env->hflags2 |= HF2_GIF_MASK;
2694 cpu_x86_update_cr0(env, 0x60000010);
2695 env->a20_mask = ~0x0;
2696 env->smbase = 0x30000;
2698 env->idt.limit = 0xffff;
2699 env->gdt.limit = 0xffff;
2700 env->ldt.limit = 0xffff;
2701 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2702 env->tr.limit = 0xffff;
2703 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2705 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2706 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2707 DESC_R_MASK | DESC_A_MASK);
2708 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2709 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2710 DESC_A_MASK);
2711 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2712 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2713 DESC_A_MASK);
2714 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2715 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2716 DESC_A_MASK);
2717 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2718 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2719 DESC_A_MASK);
2720 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2721 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2722 DESC_A_MASK);
2724 env->eip = 0xfff0;
2725 env->regs[R_EDX] = env->cpuid_version;
2727 env->eflags = 0x2;
2729 /* FPU init */
2730 for (i = 0; i < 8; i++) {
2731 env->fptags[i] = 1;
2733 cpu_set_fpuc(env, 0x37f);
2735 env->mxcsr = 0x1f80;
2736 /* All units are in INIT state. */
2737 env->xstate_bv = 0;
2739 env->pat = 0x0007040600070406ULL;
2740 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2742 memset(env->dr, 0, sizeof(env->dr));
2743 env->dr[6] = DR6_FIXED_1;
2744 env->dr[7] = DR7_FIXED_1;
2745 cpu_breakpoint_remove_all(s, BP_CPU);
2746 cpu_watchpoint_remove_all(s, BP_CPU);
2748 cr4 = 0;
2749 xcr0 = XSTATE_FP_MASK;
2751 #ifdef CONFIG_USER_ONLY
2752 /* Enable all the features for user-mode. */
2753 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2754 xcr0 |= XSTATE_SSE_MASK;
2756 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2757 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2758 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2759 xcr0 |= 1ull << i;
2763 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2764 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2766 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2767 cr4 |= CR4_FSGSBASE_MASK;
2769 #endif
2771 env->xcr0 = xcr0;
2772 cpu_x86_update_cr4(env, cr4);
2775 * SDM 11.11.5 requires:
2776 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2777 * - IA32_MTRR_PHYSMASKn.V = 0
2778 * All other bits are undefined. For simplification, zero it all.
2780 env->mtrr_deftype = 0;
2781 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2782 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2784 #if !defined(CONFIG_USER_ONLY)
2785 /* We hard-wire the BSP to the first CPU. */
2786 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2788 s->halted = !cpu_is_bsp(cpu);
2790 if (kvm_enabled()) {
2791 kvm_arch_reset_vcpu(cpu);
2793 #endif
2796 #ifndef CONFIG_USER_ONLY
2797 bool cpu_is_bsp(X86CPU *cpu)
2799 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2802 /* TODO: remove me, when reset over QOM tree is implemented */
2803 static void x86_cpu_machine_reset_cb(void *opaque)
2805 X86CPU *cpu = opaque;
2806 cpu_reset(CPU(cpu));
2808 #endif
2810 static void mce_init(X86CPU *cpu)
2812 CPUX86State *cenv = &cpu->env;
2813 unsigned int bank;
2815 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2816 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2817 (CPUID_MCE | CPUID_MCA)) {
2818 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2819 cenv->mcg_ctl = ~(uint64_t)0;
2820 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2821 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2826 #ifndef CONFIG_USER_ONLY
2827 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2829 APICCommonState *apic;
2830 const char *apic_type = "apic";
2832 if (kvm_apic_in_kernel()) {
2833 apic_type = "kvm-apic";
2834 } else if (xen_enabled()) {
2835 apic_type = "xen-apic";
2838 cpu->apic_state = DEVICE(object_new(apic_type));
2840 object_property_add_child(OBJECT(cpu), "apic",
2841 OBJECT(cpu->apic_state), NULL);
2842 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2843 /* TODO: convert to link<> */
2844 apic = APIC_COMMON(cpu->apic_state);
2845 apic->cpu = cpu;
2846 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2849 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2851 APICCommonState *apic;
2852 static bool apic_mmio_map_once;
2854 if (cpu->apic_state == NULL) {
2855 return;
2857 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2858 errp);
2860 /* Map APIC MMIO area */
2861 apic = APIC_COMMON(cpu->apic_state);
2862 if (!apic_mmio_map_once) {
2863 memory_region_add_subregion_overlap(get_system_memory(),
2864 apic->apicbase &
2865 MSR_IA32_APICBASE_BASE,
2866 &apic->io_memory,
2867 0x1000);
2868 apic_mmio_map_once = true;
2872 static void x86_cpu_machine_done(Notifier *n, void *unused)
2874 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2875 MemoryRegion *smram =
2876 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2878 if (smram) {
2879 cpu->smram = g_new(MemoryRegion, 1);
2880 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2881 smram, 0, 1ull << 32);
2882 memory_region_set_enabled(cpu->smram, false);
2883 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2886 #else
2887 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2890 #endif
2893 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2894 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2895 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2896 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2897 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2898 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2899 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2901 CPUState *cs = CPU(dev);
2902 X86CPU *cpu = X86_CPU(dev);
2903 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2904 CPUX86State *env = &cpu->env;
2905 Error *local_err = NULL;
2906 static bool ht_warned;
2907 FeatureWord w;
2909 if (xcc->kvm_required && !kvm_enabled()) {
2910 char *name = x86_cpu_class_get_model_name(xcc);
2911 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2912 g_free(name);
2913 goto out;
2916 if (cpu->apic_id < 0) {
2917 error_setg(errp, "apic-id property was not initialized properly");
2918 return;
2921 /*TODO: cpu->host_features incorrectly overwrites features
2922 * set using "feat=on|off". Once we fix this, we can convert
2923 * plus_features & minus_features to global properties
2924 * inside x86_cpu_parse_featurestr() too.
2926 if (cpu->host_features) {
2927 for (w = 0; w < FEATURE_WORDS; w++) {
2928 env->features[w] =
2929 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2933 for (w = 0; w < FEATURE_WORDS; w++) {
2934 cpu->env.features[w] |= plus_features[w];
2935 cpu->env.features[w] &= ~minus_features[w];
2938 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2939 env->cpuid_level = 7;
2942 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2943 error_setg(&local_err,
2944 kvm_enabled() ?
2945 "Host doesn't support requested features" :
2946 "TCG doesn't support requested features");
2947 goto out;
2950 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2951 * CPUID[1].EDX.
2953 if (IS_AMD_CPU(env)) {
2954 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2955 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2956 & CPUID_EXT2_AMD_ALIASES);
2960 cpu_exec_init(cs, &error_abort);
2962 if (tcg_enabled()) {
2963 tcg_x86_init();
2966 #ifndef CONFIG_USER_ONLY
2967 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2969 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2970 x86_cpu_apic_create(cpu, &local_err);
2971 if (local_err != NULL) {
2972 goto out;
2975 #endif
2977 mce_init(cpu);
2979 #ifndef CONFIG_USER_ONLY
2980 if (tcg_enabled()) {
2981 AddressSpace *newas = g_new(AddressSpace, 1);
2983 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2984 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2986 /* Outer container... */
2987 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2988 memory_region_set_enabled(cpu->cpu_as_root, true);
2990 /* ... with two regions inside: normal system memory with low
2991 * priority, and...
2993 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2994 get_system_memory(), 0, ~0ull);
2995 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2996 memory_region_set_enabled(cpu->cpu_as_mem, true);
2997 address_space_init(newas, cpu->cpu_as_root, "CPU");
2998 cs->num_ases = 1;
2999 cpu_address_space_init(cs, newas, 0);
3001 /* ... SMRAM with higher priority, linked from /machine/smram. */
3002 cpu->machine_done.notify = x86_cpu_machine_done;
3003 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3005 #endif
3007 qemu_init_vcpu(cs);
3009 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3010 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3011 * based on inputs (sockets,cores,threads), it is still better to gives
3012 * users a warning.
3014 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3015 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3017 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3018 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3019 " -smp options properly.");
3020 ht_warned = true;
3023 x86_cpu_apic_realize(cpu, &local_err);
3024 if (local_err != NULL) {
3025 goto out;
3027 cpu_reset(cs);
3029 xcc->parent_realize(dev, &local_err);
3031 out:
3032 if (local_err != NULL) {
3033 error_propagate(errp, local_err);
3034 return;
3038 typedef struct BitProperty {
3039 uint32_t *ptr;
3040 uint32_t mask;
3041 } BitProperty;
3043 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3044 void *opaque, Error **errp)
3046 BitProperty *fp = opaque;
3047 bool value = (*fp->ptr & fp->mask) == fp->mask;
3048 visit_type_bool(v, name, &value, errp);
3051 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3052 void *opaque, Error **errp)
3054 DeviceState *dev = DEVICE(obj);
3055 BitProperty *fp = opaque;
3056 Error *local_err = NULL;
3057 bool value;
3059 if (dev->realized) {
3060 qdev_prop_set_after_realize(dev, name, errp);
3061 return;
3064 visit_type_bool(v, name, &value, &local_err);
3065 if (local_err) {
3066 error_propagate(errp, local_err);
3067 return;
3070 if (value) {
3071 *fp->ptr |= fp->mask;
3072 } else {
3073 *fp->ptr &= ~fp->mask;
3077 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3078 void *opaque)
3080 BitProperty *prop = opaque;
3081 g_free(prop);
3084 /* Register a boolean property to get/set a single bit in a uint32_t field.
3086 * The same property name can be registered multiple times to make it affect
3087 * multiple bits in the same FeatureWord. In that case, the getter will return
3088 * true only if all bits are set.
3090 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3091 const char *prop_name,
3092 uint32_t *field,
3093 int bitnr)
3095 BitProperty *fp;
3096 ObjectProperty *op;
3097 uint32_t mask = (1UL << bitnr);
3099 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3100 if (op) {
3101 fp = op->opaque;
3102 assert(fp->ptr == field);
3103 fp->mask |= mask;
3104 } else {
3105 fp = g_new0(BitProperty, 1);
3106 fp->ptr = field;
3107 fp->mask = mask;
3108 object_property_add(OBJECT(cpu), prop_name, "bool",
3109 x86_cpu_get_bit_prop,
3110 x86_cpu_set_bit_prop,
3111 x86_cpu_release_bit_prop, fp, &error_abort);
3115 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3116 FeatureWord w,
3117 int bitnr)
3119 Object *obj = OBJECT(cpu);
3120 int i;
3121 char **names;
3122 FeatureWordInfo *fi = &feature_word_info[w];
3124 if (!fi->feat_names) {
3125 return;
3127 if (!fi->feat_names[bitnr]) {
3128 return;
3131 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3133 feat2prop(names[0]);
3134 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3136 for (i = 1; names[i]; i++) {
3137 feat2prop(names[i]);
3138 object_property_add_alias(obj, names[i], obj, names[0],
3139 &error_abort);
3142 g_strfreev(names);
3145 static void x86_cpu_initfn(Object *obj)
3147 CPUState *cs = CPU(obj);
3148 X86CPU *cpu = X86_CPU(obj);
3149 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3150 CPUX86State *env = &cpu->env;
3151 FeatureWord w;
3153 cs->env_ptr = env;
3155 object_property_add(obj, "family", "int",
3156 x86_cpuid_version_get_family,
3157 x86_cpuid_version_set_family, NULL, NULL, NULL);
3158 object_property_add(obj, "model", "int",
3159 x86_cpuid_version_get_model,
3160 x86_cpuid_version_set_model, NULL, NULL, NULL);
3161 object_property_add(obj, "stepping", "int",
3162 x86_cpuid_version_get_stepping,
3163 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3164 object_property_add_str(obj, "vendor",
3165 x86_cpuid_get_vendor,
3166 x86_cpuid_set_vendor, NULL);
3167 object_property_add_str(obj, "model-id",
3168 x86_cpuid_get_model_id,
3169 x86_cpuid_set_model_id, NULL);
3170 object_property_add(obj, "tsc-frequency", "int",
3171 x86_cpuid_get_tsc_freq,
3172 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3173 object_property_add(obj, "apic-id", "int",
3174 x86_cpuid_get_apic_id,
3175 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3176 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3177 x86_cpu_get_feature_words,
3178 NULL, NULL, (void *)env->features, NULL);
3179 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3180 x86_cpu_get_feature_words,
3181 NULL, NULL, (void *)cpu->filtered_features, NULL);
3183 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3185 #ifndef CONFIG_USER_ONLY
3186 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3187 cpu->apic_id = -1;
3188 #endif
3190 for (w = 0; w < FEATURE_WORDS; w++) {
3191 int bitnr;
3193 for (bitnr = 0; bitnr < 32; bitnr++) {
3194 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3198 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3201 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3203 X86CPU *cpu = X86_CPU(cs);
3205 return cpu->apic_id;
3208 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3210 X86CPU *cpu = X86_CPU(cs);
3212 return cpu->env.cr[0] & CR0_PG_MASK;
3215 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3217 X86CPU *cpu = X86_CPU(cs);
3219 cpu->env.eip = value;
3222 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3224 X86CPU *cpu = X86_CPU(cs);
3226 cpu->env.eip = tb->pc - tb->cs_base;
3229 static bool x86_cpu_has_work(CPUState *cs)
3231 X86CPU *cpu = X86_CPU(cs);
3232 CPUX86State *env = &cpu->env;
3234 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3235 CPU_INTERRUPT_POLL)) &&
3236 (env->eflags & IF_MASK)) ||
3237 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3238 CPU_INTERRUPT_INIT |
3239 CPU_INTERRUPT_SIPI |
3240 CPU_INTERRUPT_MCE)) ||
3241 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3242 !(env->hflags & HF_SMM_MASK));
3245 static Property x86_cpu_properties[] = {
3246 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3247 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3248 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3249 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3250 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3251 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3252 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3253 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3254 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3255 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3256 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3257 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3258 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3259 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3260 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3261 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3262 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3263 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3264 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3265 DEFINE_PROP_END_OF_LIST()
3268 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3270 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3271 CPUClass *cc = CPU_CLASS(oc);
3272 DeviceClass *dc = DEVICE_CLASS(oc);
3274 xcc->parent_realize = dc->realize;
3275 dc->realize = x86_cpu_realizefn;
3276 dc->props = x86_cpu_properties;
3278 xcc->parent_reset = cc->reset;
3279 cc->reset = x86_cpu_reset;
3280 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3282 cc->class_by_name = x86_cpu_class_by_name;
3283 cc->parse_features = x86_cpu_parse_featurestr;
3284 cc->has_work = x86_cpu_has_work;
3285 cc->do_interrupt = x86_cpu_do_interrupt;
3286 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3287 cc->dump_state = x86_cpu_dump_state;
3288 cc->set_pc = x86_cpu_set_pc;
3289 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3290 cc->gdb_read_register = x86_cpu_gdb_read_register;
3291 cc->gdb_write_register = x86_cpu_gdb_write_register;
3292 cc->get_arch_id = x86_cpu_get_arch_id;
3293 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3294 #ifdef CONFIG_USER_ONLY
3295 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3296 #else
3297 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3298 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3299 cc->write_elf64_note = x86_cpu_write_elf64_note;
3300 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3301 cc->write_elf32_note = x86_cpu_write_elf32_note;
3302 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3303 cc->vmsd = &vmstate_x86_cpu;
3304 #endif
3305 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3306 #ifndef CONFIG_USER_ONLY
3307 cc->debug_excp_handler = breakpoint_handler;
3308 #endif
3309 cc->cpu_exec_enter = x86_cpu_exec_enter;
3310 cc->cpu_exec_exit = x86_cpu_exec_exit;
3313 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3314 * object in cpus -> dangling pointer after final object_unref().
3316 dc->cannot_destroy_with_object_finalize_yet = true;
3319 static const TypeInfo x86_cpu_type_info = {
3320 .name = TYPE_X86_CPU,
3321 .parent = TYPE_CPU,
3322 .instance_size = sizeof(X86CPU),
3323 .instance_init = x86_cpu_initfn,
3324 .abstract = true,
3325 .class_size = sizeof(X86CPUClass),
3326 .class_init = x86_cpu_common_class_init,
3329 static void x86_cpu_register_types(void)
3331 int i;
3333 type_register_static(&x86_cpu_type_info);
3334 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3335 x86_register_cpudef_type(&builtin_x86_defs[i]);
3337 #ifdef CONFIG_KVM
3338 type_register_static(&host_x86_cpu_type_info);
3339 #endif
3342 type_init(x86_cpu_register_types)