target-i386: Move xcc->kvm_required check to realize time
[qemu.git] / target-i386 / cpu.c
blob7db632b8ddf84ac78d28eb37e7cabdd2891e5cf7
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
473 #undef REGISTER
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = offsetof(X86XSaveArea, avx_state),
479 .size = sizeof(XSaveAVX) },
480 [XSTATE_BNDREGS_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = offsetof(X86XSaveArea, bndreg_state),
483 .size = sizeof(XSaveBNDREG) },
484 [XSTATE_BNDCSR_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
486 .offset = offsetof(X86XSaveArea, bndcsr_state),
487 .size = sizeof(XSaveBNDCSR) },
488 [XSTATE_OPMASK_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = offsetof(X86XSaveArea, opmask_state),
491 .size = sizeof(XSaveOpmask) },
492 [XSTATE_ZMM_Hi256_BIT] =
493 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
494 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
495 .size = sizeof(XSaveZMM_Hi256) },
496 [XSTATE_Hi16_ZMM_BIT] =
497 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
498 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
499 .size = sizeof(XSaveHi16_ZMM) },
500 [XSTATE_PKRU_BIT] =
501 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
502 .offset = offsetof(X86XSaveArea, pkru_state),
503 .size = sizeof(XSavePKRU) },
506 const char *get_register_name_32(unsigned int reg)
508 if (reg >= CPU_NB_REGS32) {
509 return NULL;
511 return x86_reg_info_32[reg].name;
515 * Returns the set of feature flags that are supported and migratable by
516 * QEMU, for a given FeatureWord.
518 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
520 FeatureWordInfo *wi = &feature_word_info[w];
521 uint32_t r = 0;
522 int i;
524 for (i = 0; i < 32; i++) {
525 uint32_t f = 1U << i;
526 /* If the feature name is unknown, it is not supported by QEMU yet */
527 if (!wi->feat_names[i]) {
528 continue;
530 /* Skip features known to QEMU, but explicitly marked as unmigratable */
531 if (wi->unmigratable_flags & f) {
532 continue;
534 r |= f;
536 return r;
539 void host_cpuid(uint32_t function, uint32_t count,
540 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
542 uint32_t vec[4];
544 #ifdef __x86_64__
545 asm volatile("cpuid"
546 : "=a"(vec[0]), "=b"(vec[1]),
547 "=c"(vec[2]), "=d"(vec[3])
548 : "0"(function), "c"(count) : "cc");
549 #elif defined(__i386__)
550 asm volatile("pusha \n\t"
551 "cpuid \n\t"
552 "mov %%eax, 0(%2) \n\t"
553 "mov %%ebx, 4(%2) \n\t"
554 "mov %%ecx, 8(%2) \n\t"
555 "mov %%edx, 12(%2) \n\t"
556 "popa"
557 : : "a"(function), "c"(count), "S"(vec)
558 : "memory", "cc");
559 #else
560 abort();
561 #endif
563 if (eax)
564 *eax = vec[0];
565 if (ebx)
566 *ebx = vec[1];
567 if (ecx)
568 *ecx = vec[2];
569 if (edx)
570 *edx = vec[3];
573 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
575 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
576 * a substring. ex if !NULL points to the first char after a substring,
577 * otherwise the string is assumed to sized by a terminating nul.
578 * Return lexical ordering of *s1:*s2.
580 static int sstrcmp(const char *s1, const char *e1,
581 const char *s2, const char *e2)
583 for (;;) {
584 if (!*s1 || !*s2 || *s1 != *s2)
585 return (*s1 - *s2);
586 ++s1, ++s2;
587 if (s1 == e1 && s2 == e2)
588 return (0);
589 else if (s1 == e1)
590 return (*s2);
591 else if (s2 == e2)
592 return (*s1);
596 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
597 * '|' delimited (possibly empty) strings in which case search for a match
598 * within the alternatives proceeds left to right. Return 0 for success,
599 * non-zero otherwise.
601 static int altcmp(const char *s, const char *e, const char *altstr)
603 const char *p, *q;
605 for (q = p = altstr; ; ) {
606 while (*p && *p != '|')
607 ++p;
608 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
609 return (0);
610 if (!*p)
611 return (1);
612 else
613 q = ++p;
617 /* search featureset for flag *[s..e), if found set corresponding bit in
618 * *pval and return true, otherwise return false
620 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
621 const char **featureset)
623 uint32_t mask;
624 const char **ppc;
625 bool found = false;
627 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
628 if (*ppc && !altcmp(s, e, *ppc)) {
629 *pval |= mask;
630 found = true;
633 return found;
636 static void add_flagname_to_bitmaps(const char *flagname,
637 FeatureWordArray words,
638 Error **errp)
640 FeatureWord w;
641 for (w = 0; w < FEATURE_WORDS; w++) {
642 FeatureWordInfo *wi = &feature_word_info[w];
643 if (wi->feat_names &&
644 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
645 break;
648 if (w == FEATURE_WORDS) {
649 error_setg(errp, "CPU feature %s not found", flagname);
653 /* CPU class name definitions: */
655 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
658 /* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
661 static char *x86_cpu_type_name(const char *model_name)
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
666 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
668 ObjectClass *oc;
669 char *typename;
671 if (cpu_model == NULL) {
672 return NULL;
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
685 return g_strndup(class_name,
686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
689 struct X86CPUDefinition {
690 const char *name;
691 uint32_t level;
692 uint32_t xlevel;
693 uint32_t xlevel2;
694 /* vendor is zero-terminated, 12 character ASCII string */
695 char vendor[CPUID_VENDOR_SZ + 1];
696 int family;
697 int model;
698 int stepping;
699 FeatureWordArray features;
700 char model_id[48];
703 static X86CPUDefinition builtin_x86_defs[] = {
705 .name = "qemu64",
706 .level = 0xd,
707 .vendor = CPUID_VENDOR_AMD,
708 .family = 6,
709 .model = 6,
710 .stepping = 3,
711 .features[FEAT_1_EDX] =
712 PPRO_FEATURES |
713 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
714 CPUID_PSE36,
715 .features[FEAT_1_ECX] =
716 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
717 .features[FEAT_8000_0001_EDX] =
718 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
719 .features[FEAT_8000_0001_ECX] =
720 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
721 .xlevel = 0x8000000A,
722 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
725 .name = "phenom",
726 .level = 5,
727 .vendor = CPUID_VENDOR_AMD,
728 .family = 16,
729 .model = 2,
730 .stepping = 3,
731 /* Missing: CPUID_HT */
732 .features[FEAT_1_EDX] =
733 PPRO_FEATURES |
734 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
735 CPUID_PSE36 | CPUID_VME,
736 .features[FEAT_1_ECX] =
737 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
738 CPUID_EXT_POPCNT,
739 .features[FEAT_8000_0001_EDX] =
740 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
741 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
742 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
743 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
744 CPUID_EXT3_CR8LEG,
745 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
746 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
747 .features[FEAT_8000_0001_ECX] =
748 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
749 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
750 /* Missing: CPUID_SVM_LBRV */
751 .features[FEAT_SVM] =
752 CPUID_SVM_NPT,
753 .xlevel = 0x8000001A,
754 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
757 .name = "core2duo",
758 .level = 10,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 6,
761 .model = 15,
762 .stepping = 11,
763 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES |
766 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
767 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
768 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
769 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
770 .features[FEAT_1_ECX] =
771 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
772 CPUID_EXT_CX16,
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 .features[FEAT_8000_0001_ECX] =
776 CPUID_EXT3_LAHF_LM,
777 .xlevel = 0x80000008,
778 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
781 .name = "kvm64",
782 .level = 0xd,
783 .vendor = CPUID_VENDOR_INTEL,
784 .family = 15,
785 .model = 6,
786 .stepping = 1,
787 /* Missing: CPUID_HT */
788 .features[FEAT_1_EDX] =
789 PPRO_FEATURES | CPUID_VME |
790 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
791 CPUID_PSE36,
792 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
795 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
796 .features[FEAT_8000_0001_EDX] =
797 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
798 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
799 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
800 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
801 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
802 .features[FEAT_8000_0001_ECX] =
804 .xlevel = 0x80000008,
805 .model_id = "Common KVM processor"
808 .name = "qemu32",
809 .level = 4,
810 .vendor = CPUID_VENDOR_INTEL,
811 .family = 6,
812 .model = 6,
813 .stepping = 3,
814 .features[FEAT_1_EDX] =
815 PPRO_FEATURES,
816 .features[FEAT_1_ECX] =
817 CPUID_EXT_SSE3,
818 .xlevel = 0x80000004,
819 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
822 .name = "kvm32",
823 .level = 5,
824 .vendor = CPUID_VENDOR_INTEL,
825 .family = 15,
826 .model = 6,
827 .stepping = 1,
828 .features[FEAT_1_EDX] =
829 PPRO_FEATURES | CPUID_VME |
830 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
831 .features[FEAT_1_ECX] =
832 CPUID_EXT_SSE3,
833 .features[FEAT_8000_0001_ECX] =
835 .xlevel = 0x80000008,
836 .model_id = "Common 32-bit KVM processor"
839 .name = "coreduo",
840 .level = 10,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 6,
843 .model = 14,
844 .stepping = 8,
845 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
846 .features[FEAT_1_EDX] =
847 PPRO_FEATURES | CPUID_VME |
848 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
849 CPUID_SS,
850 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
851 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
852 .features[FEAT_1_ECX] =
853 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
854 .features[FEAT_8000_0001_EDX] =
855 CPUID_EXT2_NX,
856 .xlevel = 0x80000008,
857 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
860 .name = "486",
861 .level = 1,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 4,
864 .model = 8,
865 .stepping = 0,
866 .features[FEAT_1_EDX] =
867 I486_FEATURES,
868 .xlevel = 0,
871 .name = "pentium",
872 .level = 1,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 5,
875 .model = 4,
876 .stepping = 3,
877 .features[FEAT_1_EDX] =
878 PENTIUM_FEATURES,
879 .xlevel = 0,
882 .name = "pentium2",
883 .level = 2,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 5,
887 .stepping = 2,
888 .features[FEAT_1_EDX] =
889 PENTIUM2_FEATURES,
890 .xlevel = 0,
893 .name = "pentium3",
894 .level = 3,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 7,
898 .stepping = 3,
899 .features[FEAT_1_EDX] =
900 PENTIUM3_FEATURES,
901 .xlevel = 0,
904 .name = "athlon",
905 .level = 2,
906 .vendor = CPUID_VENDOR_AMD,
907 .family = 6,
908 .model = 2,
909 .stepping = 3,
910 .features[FEAT_1_EDX] =
911 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
912 CPUID_MCA,
913 .features[FEAT_8000_0001_EDX] =
914 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
915 .xlevel = 0x80000008,
916 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
919 .name = "n270",
920 .level = 10,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 28,
924 .stepping = 2,
925 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
926 .features[FEAT_1_EDX] =
927 PPRO_FEATURES |
928 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
929 CPUID_ACPI | CPUID_SS,
930 /* Some CPUs got no CPUID_SEP */
931 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
932 * CPUID_EXT_XTPR */
933 .features[FEAT_1_ECX] =
934 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
935 CPUID_EXT_MOVBE,
936 .features[FEAT_8000_0001_EDX] =
937 CPUID_EXT2_NX,
938 .features[FEAT_8000_0001_ECX] =
939 CPUID_EXT3_LAHF_LM,
940 .xlevel = 0x80000008,
941 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
944 .name = "Conroe",
945 .level = 10,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 6,
948 .model = 15,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
952 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
953 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
954 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
955 CPUID_DE | CPUID_FP87,
956 .features[FEAT_1_ECX] =
957 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
958 .features[FEAT_8000_0001_EDX] =
959 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
960 .features[FEAT_8000_0001_ECX] =
961 CPUID_EXT3_LAHF_LM,
962 .xlevel = 0x80000008,
963 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
966 .name = "Penryn",
967 .level = 10,
968 .vendor = CPUID_VENDOR_INTEL,
969 .family = 6,
970 .model = 23,
971 .stepping = 3,
972 .features[FEAT_1_EDX] =
973 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
974 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
975 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
976 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
977 CPUID_DE | CPUID_FP87,
978 .features[FEAT_1_ECX] =
979 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
980 CPUID_EXT_SSE3,
981 .features[FEAT_8000_0001_EDX] =
982 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
983 .features[FEAT_8000_0001_ECX] =
984 CPUID_EXT3_LAHF_LM,
985 .xlevel = 0x80000008,
986 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
989 .name = "Nehalem",
990 .level = 11,
991 .vendor = CPUID_VENDOR_INTEL,
992 .family = 6,
993 .model = 26,
994 .stepping = 3,
995 .features[FEAT_1_EDX] =
996 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
997 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
998 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
999 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1000 CPUID_DE | CPUID_FP87,
1001 .features[FEAT_1_ECX] =
1002 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1003 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1004 .features[FEAT_8000_0001_EDX] =
1005 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1006 .features[FEAT_8000_0001_ECX] =
1007 CPUID_EXT3_LAHF_LM,
1008 .xlevel = 0x80000008,
1009 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1012 .name = "Westmere",
1013 .level = 11,
1014 .vendor = CPUID_VENDOR_INTEL,
1015 .family = 6,
1016 .model = 44,
1017 .stepping = 1,
1018 .features[FEAT_1_EDX] =
1019 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1020 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1021 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1022 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1023 CPUID_DE | CPUID_FP87,
1024 .features[FEAT_1_ECX] =
1025 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1026 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1027 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .features[FEAT_6_EAX] =
1033 CPUID_6_EAX_ARAT,
1034 .xlevel = 0x80000008,
1035 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1038 .name = "SandyBridge",
1039 .level = 0xd,
1040 .vendor = CPUID_VENDOR_INTEL,
1041 .family = 6,
1042 .model = 42,
1043 .stepping = 1,
1044 .features[FEAT_1_EDX] =
1045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1049 CPUID_DE | CPUID_FP87,
1050 .features[FEAT_1_ECX] =
1051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1052 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1053 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1054 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1055 CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1058 CPUID_EXT2_SYSCALL,
1059 .features[FEAT_8000_0001_ECX] =
1060 CPUID_EXT3_LAHF_LM,
1061 .features[FEAT_XSAVE] =
1062 CPUID_XSAVE_XSAVEOPT,
1063 .features[FEAT_6_EAX] =
1064 CPUID_6_EAX_ARAT,
1065 .xlevel = 0x80000008,
1066 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1069 .name = "IvyBridge",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 58,
1074 .stepping = 9,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1084 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1085 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1086 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1087 .features[FEAT_7_0_EBX] =
1088 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1089 CPUID_7_0_EBX_ERMS,
1090 .features[FEAT_8000_0001_EDX] =
1091 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1092 CPUID_EXT2_SYSCALL,
1093 .features[FEAT_8000_0001_ECX] =
1094 CPUID_EXT3_LAHF_LM,
1095 .features[FEAT_XSAVE] =
1096 CPUID_XSAVE_XSAVEOPT,
1097 .features[FEAT_6_EAX] =
1098 CPUID_6_EAX_ARAT,
1099 .xlevel = 0x80000008,
1100 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1103 .name = "Haswell-noTSX",
1104 .level = 0xd,
1105 .vendor = CPUID_VENDOR_INTEL,
1106 .family = 6,
1107 .model = 60,
1108 .stepping = 1,
1109 .features[FEAT_1_EDX] =
1110 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1117 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1118 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1119 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1121 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1122 .features[FEAT_8000_0001_EDX] =
1123 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1124 CPUID_EXT2_SYSCALL,
1125 .features[FEAT_8000_0001_ECX] =
1126 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1127 .features[FEAT_7_0_EBX] =
1128 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1129 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1130 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1131 .features[FEAT_XSAVE] =
1132 CPUID_XSAVE_XSAVEOPT,
1133 .features[FEAT_6_EAX] =
1134 CPUID_6_EAX_ARAT,
1135 .xlevel = 0x80000008,
1136 .model_id = "Intel Core Processor (Haswell, no TSX)",
1137 }, {
1138 .name = "Haswell",
1139 .level = 0xd,
1140 .vendor = CPUID_VENDOR_INTEL,
1141 .family = 6,
1142 .model = 60,
1143 .stepping = 1,
1144 .features[FEAT_1_EDX] =
1145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1149 CPUID_DE | CPUID_FP87,
1150 .features[FEAT_1_ECX] =
1151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_8000_0001_EDX] =
1158 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1159 CPUID_EXT2_SYSCALL,
1160 .features[FEAT_8000_0001_ECX] =
1161 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1162 .features[FEAT_7_0_EBX] =
1163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1166 CPUID_7_0_EBX_RTM,
1167 .features[FEAT_XSAVE] =
1168 CPUID_XSAVE_XSAVEOPT,
1169 .features[FEAT_6_EAX] =
1170 CPUID_6_EAX_ARAT,
1171 .xlevel = 0x80000008,
1172 .model_id = "Intel Core Processor (Haswell)",
1175 .name = "Broadwell-noTSX",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_INTEL,
1178 .family = 6,
1179 .model = 61,
1180 .stepping = 2,
1181 .features[FEAT_1_EDX] =
1182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1194 .features[FEAT_8000_0001_EDX] =
1195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1196 CPUID_EXT2_SYSCALL,
1197 .features[FEAT_8000_0001_ECX] =
1198 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1199 .features[FEAT_7_0_EBX] =
1200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1201 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1203 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1204 CPUID_7_0_EBX_SMAP,
1205 .features[FEAT_XSAVE] =
1206 CPUID_XSAVE_XSAVEOPT,
1207 .features[FEAT_6_EAX] =
1208 CPUID_6_EAX_ARAT,
1209 .xlevel = 0x80000008,
1210 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1213 .name = "Broadwell",
1214 .level = 0xd,
1215 .vendor = CPUID_VENDOR_INTEL,
1216 .family = 6,
1217 .model = 61,
1218 .stepping = 2,
1219 .features[FEAT_1_EDX] =
1220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1224 CPUID_DE | CPUID_FP87,
1225 .features[FEAT_1_ECX] =
1226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1227 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1228 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1229 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1230 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1231 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1232 .features[FEAT_8000_0001_EDX] =
1233 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1234 CPUID_EXT2_SYSCALL,
1235 .features[FEAT_8000_0001_ECX] =
1236 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1237 .features[FEAT_7_0_EBX] =
1238 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1239 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1240 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1241 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1242 CPUID_7_0_EBX_SMAP,
1243 .features[FEAT_XSAVE] =
1244 CPUID_XSAVE_XSAVEOPT,
1245 .features[FEAT_6_EAX] =
1246 CPUID_6_EAX_ARAT,
1247 .xlevel = 0x80000008,
1248 .model_id = "Intel Core Processor (Broadwell)",
1251 .name = "Skylake-Client",
1252 .level = 0xd,
1253 .vendor = CPUID_VENDOR_INTEL,
1254 .family = 6,
1255 .model = 94,
1256 .stepping = 3,
1257 .features[FEAT_1_EDX] =
1258 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1259 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1260 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1261 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1262 CPUID_DE | CPUID_FP87,
1263 .features[FEAT_1_ECX] =
1264 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1265 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1266 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1267 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1268 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1269 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1270 .features[FEAT_8000_0001_EDX] =
1271 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1272 CPUID_EXT2_SYSCALL,
1273 .features[FEAT_8000_0001_ECX] =
1274 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1275 .features[FEAT_7_0_EBX] =
1276 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1277 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1278 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1279 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1280 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1281 /* Missing: XSAVES (not supported by some Linux versions,
1282 * including v4.1 to v4.6).
1283 * KVM doesn't yet expose any XSAVES state save component,
1284 * and the only one defined in Skylake (processor tracing)
1285 * probably will block migration anyway.
1287 .features[FEAT_XSAVE] =
1288 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1289 CPUID_XSAVE_XGETBV1,
1290 .features[FEAT_6_EAX] =
1291 CPUID_6_EAX_ARAT,
1292 .xlevel = 0x80000008,
1293 .model_id = "Intel Core Processor (Skylake)",
1296 .name = "Opteron_G1",
1297 .level = 5,
1298 .vendor = CPUID_VENDOR_AMD,
1299 .family = 15,
1300 .model = 6,
1301 .stepping = 1,
1302 .features[FEAT_1_EDX] =
1303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1307 CPUID_DE | CPUID_FP87,
1308 .features[FEAT_1_ECX] =
1309 CPUID_EXT_SSE3,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1312 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1313 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1314 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1315 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1316 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .xlevel = 0x80000008,
1318 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1321 .name = "Opteron_G2",
1322 .level = 5,
1323 .vendor = CPUID_VENDOR_AMD,
1324 .family = 15,
1325 .model = 6,
1326 .stepping = 1,
1327 .features[FEAT_1_EDX] =
1328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1332 CPUID_DE | CPUID_FP87,
1333 .features[FEAT_1_ECX] =
1334 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1335 /* Missing: CPUID_EXT2_RDTSCP */
1336 .features[FEAT_8000_0001_EDX] =
1337 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1338 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1339 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1340 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1341 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1342 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1343 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1344 .features[FEAT_8000_0001_ECX] =
1345 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1346 .xlevel = 0x80000008,
1347 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1350 .name = "Opteron_G3",
1351 .level = 5,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 15,
1354 .model = 6,
1355 .stepping = 1,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1364 CPUID_EXT_SSE3,
1365 /* Missing: CPUID_EXT2_RDTSCP */
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1368 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1369 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1370 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1371 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1372 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1373 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1374 .features[FEAT_8000_0001_ECX] =
1375 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1376 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1377 .xlevel = 0x80000008,
1378 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1381 .name = "Opteron_G4",
1382 .level = 0xd,
1383 .vendor = CPUID_VENDOR_AMD,
1384 .family = 21,
1385 .model = 1,
1386 .stepping = 2,
1387 .features[FEAT_1_EDX] =
1388 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1389 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1390 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1391 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1392 CPUID_DE | CPUID_FP87,
1393 .features[FEAT_1_ECX] =
1394 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1395 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1396 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1397 CPUID_EXT_SSE3,
1398 /* Missing: CPUID_EXT2_RDTSCP */
1399 .features[FEAT_8000_0001_EDX] =
1400 CPUID_EXT2_LM |
1401 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1402 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1403 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1404 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1405 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1406 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1407 .features[FEAT_8000_0001_ECX] =
1408 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1409 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1410 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1411 CPUID_EXT3_LAHF_LM,
1412 /* no xsaveopt! */
1413 .xlevel = 0x8000001A,
1414 .model_id = "AMD Opteron 62xx class CPU",
1417 .name = "Opteron_G5",
1418 .level = 0xd,
1419 .vendor = CPUID_VENDOR_AMD,
1420 .family = 21,
1421 .model = 2,
1422 .stepping = 0,
1423 .features[FEAT_1_EDX] =
1424 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1425 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1426 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1427 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1428 CPUID_DE | CPUID_FP87,
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1431 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1432 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1433 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1434 /* Missing: CPUID_EXT2_RDTSCP */
1435 .features[FEAT_8000_0001_EDX] =
1436 CPUID_EXT2_LM |
1437 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1438 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1439 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1440 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1441 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1442 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1443 .features[FEAT_8000_0001_ECX] =
1444 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1445 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1446 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1447 CPUID_EXT3_LAHF_LM,
1448 /* no xsaveopt! */
1449 .xlevel = 0x8000001A,
1450 .model_id = "AMD Opteron 63xx class CPU",
1454 typedef struct PropValue {
1455 const char *prop, *value;
1456 } PropValue;
1458 /* KVM-specific features that are automatically added/removed
1459 * from all CPU models when KVM is enabled.
1461 static PropValue kvm_default_props[] = {
1462 { "kvmclock", "on" },
1463 { "kvm-nopiodelay", "on" },
1464 { "kvm-asyncpf", "on" },
1465 { "kvm-steal-time", "on" },
1466 { "kvm-pv-eoi", "on" },
1467 { "kvmclock-stable-bit", "on" },
1468 { "x2apic", "on" },
1469 { "acpi", "off" },
1470 { "monitor", "off" },
1471 { "svm", "off" },
1472 { NULL, NULL },
1475 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1477 PropValue *pv;
1478 for (pv = kvm_default_props; pv->prop; pv++) {
1479 if (!strcmp(pv->prop, prop)) {
1480 pv->value = value;
1481 break;
1485 /* It is valid to call this function only for properties that
1486 * are already present in the kvm_default_props table.
1488 assert(pv->prop);
1491 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1492 bool migratable_only);
1494 #ifdef CONFIG_KVM
1496 static int cpu_x86_fill_model_id(char *str)
1498 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1499 int i;
1501 for (i = 0; i < 3; i++) {
1502 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1503 memcpy(str + i * 16 + 0, &eax, 4);
1504 memcpy(str + i * 16 + 4, &ebx, 4);
1505 memcpy(str + i * 16 + 8, &ecx, 4);
1506 memcpy(str + i * 16 + 12, &edx, 4);
1508 return 0;
1511 static X86CPUDefinition host_cpudef;
1513 static Property host_x86_cpu_properties[] = {
1514 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1515 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1516 DEFINE_PROP_END_OF_LIST()
1519 /* class_init for the "host" CPU model
1521 * This function may be called before KVM is initialized.
1523 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1525 DeviceClass *dc = DEVICE_CLASS(oc);
1526 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1529 xcc->kvm_required = true;
1531 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1532 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1534 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1535 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1536 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1537 host_cpudef.stepping = eax & 0x0F;
1539 cpu_x86_fill_model_id(host_cpudef.model_id);
1541 xcc->cpu_def = &host_cpudef;
1543 /* level, xlevel, xlevel2, and the feature words are initialized on
1544 * instance_init, because they require KVM to be initialized.
1547 dc->props = host_x86_cpu_properties;
1548 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1549 dc->cannot_destroy_with_object_finalize_yet = true;
1552 static void host_x86_cpu_initfn(Object *obj)
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 KVMState *s = kvm_state;
1558 /* We can't fill the features array here because we don't know yet if
1559 * "migratable" is true or false.
1561 cpu->host_features = true;
1563 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1564 if (kvm_enabled()) {
1565 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1566 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1567 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1570 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1573 static const TypeInfo host_x86_cpu_type_info = {
1574 .name = X86_CPU_TYPE_NAME("host"),
1575 .parent = TYPE_X86_CPU,
1576 .instance_init = host_x86_cpu_initfn,
1577 .class_init = host_x86_cpu_class_init,
1580 #endif
1582 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1584 FeatureWordInfo *f = &feature_word_info[w];
1585 int i;
1587 for (i = 0; i < 32; ++i) {
1588 if ((1UL << i) & mask) {
1589 const char *reg = get_register_name_32(f->cpuid_reg);
1590 assert(reg);
1591 fprintf(stderr, "warning: %s doesn't support requested feature: "
1592 "CPUID.%02XH:%s%s%s [bit %d]\n",
1593 kvm_enabled() ? "host" : "TCG",
1594 f->cpuid_eax, reg,
1595 f->feat_names[i] ? "." : "",
1596 f->feat_names[i] ? f->feat_names[i] : "", i);
1601 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1602 const char *name, void *opaque,
1603 Error **errp)
1605 X86CPU *cpu = X86_CPU(obj);
1606 CPUX86State *env = &cpu->env;
1607 int64_t value;
1609 value = (env->cpuid_version >> 8) & 0xf;
1610 if (value == 0xf) {
1611 value += (env->cpuid_version >> 20) & 0xff;
1613 visit_type_int(v, name, &value, errp);
1616 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1617 const char *name, void *opaque,
1618 Error **errp)
1620 X86CPU *cpu = X86_CPU(obj);
1621 CPUX86State *env = &cpu->env;
1622 const int64_t min = 0;
1623 const int64_t max = 0xff + 0xf;
1624 Error *local_err = NULL;
1625 int64_t value;
1627 visit_type_int(v, name, &value, &local_err);
1628 if (local_err) {
1629 error_propagate(errp, local_err);
1630 return;
1632 if (value < min || value > max) {
1633 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1634 name ? name : "null", value, min, max);
1635 return;
1638 env->cpuid_version &= ~0xff00f00;
1639 if (value > 0x0f) {
1640 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1641 } else {
1642 env->cpuid_version |= value << 8;
1646 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1647 const char *name, void *opaque,
1648 Error **errp)
1650 X86CPU *cpu = X86_CPU(obj);
1651 CPUX86State *env = &cpu->env;
1652 int64_t value;
1654 value = (env->cpuid_version >> 4) & 0xf;
1655 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1656 visit_type_int(v, name, &value, errp);
1659 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1660 const char *name, void *opaque,
1661 Error **errp)
1663 X86CPU *cpu = X86_CPU(obj);
1664 CPUX86State *env = &cpu->env;
1665 const int64_t min = 0;
1666 const int64_t max = 0xff;
1667 Error *local_err = NULL;
1668 int64_t value;
1670 visit_type_int(v, name, &value, &local_err);
1671 if (local_err) {
1672 error_propagate(errp, local_err);
1673 return;
1675 if (value < min || value > max) {
1676 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1677 name ? name : "null", value, min, max);
1678 return;
1681 env->cpuid_version &= ~0xf00f0;
1682 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1685 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1686 const char *name, void *opaque,
1687 Error **errp)
1689 X86CPU *cpu = X86_CPU(obj);
1690 CPUX86State *env = &cpu->env;
1691 int64_t value;
1693 value = env->cpuid_version & 0xf;
1694 visit_type_int(v, name, &value, errp);
1697 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1698 const char *name, void *opaque,
1699 Error **errp)
1701 X86CPU *cpu = X86_CPU(obj);
1702 CPUX86State *env = &cpu->env;
1703 const int64_t min = 0;
1704 const int64_t max = 0xf;
1705 Error *local_err = NULL;
1706 int64_t value;
1708 visit_type_int(v, name, &value, &local_err);
1709 if (local_err) {
1710 error_propagate(errp, local_err);
1711 return;
1713 if (value < min || value > max) {
1714 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1715 name ? name : "null", value, min, max);
1716 return;
1719 env->cpuid_version &= ~0xf;
1720 env->cpuid_version |= value & 0xf;
1723 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1725 X86CPU *cpu = X86_CPU(obj);
1726 CPUX86State *env = &cpu->env;
1727 char *value;
1729 value = g_malloc(CPUID_VENDOR_SZ + 1);
1730 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1731 env->cpuid_vendor3);
1732 return value;
1735 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1736 Error **errp)
1738 X86CPU *cpu = X86_CPU(obj);
1739 CPUX86State *env = &cpu->env;
1740 int i;
1742 if (strlen(value) != CPUID_VENDOR_SZ) {
1743 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1744 return;
1747 env->cpuid_vendor1 = 0;
1748 env->cpuid_vendor2 = 0;
1749 env->cpuid_vendor3 = 0;
1750 for (i = 0; i < 4; i++) {
1751 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1752 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1753 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1757 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1759 X86CPU *cpu = X86_CPU(obj);
1760 CPUX86State *env = &cpu->env;
1761 char *value;
1762 int i;
1764 value = g_malloc(48 + 1);
1765 for (i = 0; i < 48; i++) {
1766 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1768 value[48] = '\0';
1769 return value;
1772 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1773 Error **errp)
1775 X86CPU *cpu = X86_CPU(obj);
1776 CPUX86State *env = &cpu->env;
1777 int c, len, i;
1779 if (model_id == NULL) {
1780 model_id = "";
1782 len = strlen(model_id);
1783 memset(env->cpuid_model, 0, 48);
1784 for (i = 0; i < 48; i++) {
1785 if (i >= len) {
1786 c = '\0';
1787 } else {
1788 c = (uint8_t)model_id[i];
1790 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1794 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1795 void *opaque, Error **errp)
1797 X86CPU *cpu = X86_CPU(obj);
1798 int64_t value;
1800 value = cpu->env.tsc_khz * 1000;
1801 visit_type_int(v, name, &value, errp);
1804 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1805 void *opaque, Error **errp)
1807 X86CPU *cpu = X86_CPU(obj);
1808 const int64_t min = 0;
1809 const int64_t max = INT64_MAX;
1810 Error *local_err = NULL;
1811 int64_t value;
1813 visit_type_int(v, name, &value, &local_err);
1814 if (local_err) {
1815 error_propagate(errp, local_err);
1816 return;
1818 if (value < min || value > max) {
1819 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1820 name ? name : "null", value, min, max);
1821 return;
1824 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1827 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1828 void *opaque, Error **errp)
1830 X86CPU *cpu = X86_CPU(obj);
1831 int64_t value = cpu->apic_id;
1833 visit_type_int(v, name, &value, errp);
1836 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1839 X86CPU *cpu = X86_CPU(obj);
1840 DeviceState *dev = DEVICE(obj);
1841 const int64_t min = 0;
1842 const int64_t max = UINT32_MAX;
1843 Error *error = NULL;
1844 int64_t value;
1846 if (dev->realized) {
1847 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1848 "it was realized", name, object_get_typename(obj));
1849 return;
1852 visit_type_int(v, name, &value, &error);
1853 if (error) {
1854 error_propagate(errp, error);
1855 return;
1857 if (value < min || value > max) {
1858 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1859 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1860 object_get_typename(obj), name, value, min, max);
1861 return;
1864 if ((value != cpu->apic_id) && cpu_exists(value)) {
1865 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1866 return;
1868 cpu->apic_id = value;
1871 /* Generic getter for "feature-words" and "filtered-features" properties */
1872 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1873 const char *name, void *opaque,
1874 Error **errp)
1876 uint32_t *array = (uint32_t *)opaque;
1877 FeatureWord w;
1878 Error *err = NULL;
1879 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1880 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1881 X86CPUFeatureWordInfoList *list = NULL;
1883 for (w = 0; w < FEATURE_WORDS; w++) {
1884 FeatureWordInfo *wi = &feature_word_info[w];
1885 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1886 qwi->cpuid_input_eax = wi->cpuid_eax;
1887 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1888 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1889 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1890 qwi->features = array[w];
1892 /* List will be in reverse order, but order shouldn't matter */
1893 list_entries[w].next = list;
1894 list_entries[w].value = &word_infos[w];
1895 list = &list_entries[w];
1898 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1899 error_propagate(errp, err);
1902 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1903 void *opaque, Error **errp)
1905 X86CPU *cpu = X86_CPU(obj);
1906 int64_t value = cpu->hyperv_spinlock_attempts;
1908 visit_type_int(v, name, &value, errp);
1911 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1912 void *opaque, Error **errp)
1914 const int64_t min = 0xFFF;
1915 const int64_t max = UINT_MAX;
1916 X86CPU *cpu = X86_CPU(obj);
1917 Error *err = NULL;
1918 int64_t value;
1920 visit_type_int(v, name, &value, &err);
1921 if (err) {
1922 error_propagate(errp, err);
1923 return;
1926 if (value < min || value > max) {
1927 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1928 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1929 object_get_typename(obj), name ? name : "null",
1930 value, min, max);
1931 return;
1933 cpu->hyperv_spinlock_attempts = value;
1936 static PropertyInfo qdev_prop_spinlocks = {
1937 .name = "int",
1938 .get = x86_get_hv_spinlocks,
1939 .set = x86_set_hv_spinlocks,
1942 /* Convert all '_' in a feature string option name to '-', to make feature
1943 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1945 static inline void feat2prop(char *s)
1947 while ((s = strchr(s, '_'))) {
1948 *s = '-';
1952 /* Compatibily hack to maintain legacy +-feat semantic,
1953 * where +-feat overwrites any feature set by
1954 * feat=on|feat even if the later is parsed after +-feat
1955 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1957 static FeatureWordArray plus_features = { 0 };
1958 static FeatureWordArray minus_features = { 0 };
1960 /* Parse "+feature,-feature,feature=foo" CPU feature string
1962 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1963 Error **errp)
1965 X86CPU *cpu = X86_CPU(cs);
1966 char *featurestr; /* Single 'key=value" string being parsed */
1967 Error *local_err = NULL;
1969 featurestr = features ? strtok(features, ",") : NULL;
1971 while (featurestr) {
1972 char *val;
1973 if (featurestr[0] == '+') {
1974 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1975 } else if (featurestr[0] == '-') {
1976 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1977 } else if ((val = strchr(featurestr, '='))) {
1978 *val = 0; val++;
1979 feat2prop(featurestr);
1980 if (!strcmp(featurestr, "tsc-freq")) {
1981 int64_t tsc_freq;
1982 char *err;
1983 char num[32];
1985 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1986 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1987 if (tsc_freq < 0 || *err) {
1988 error_setg(errp, "bad numerical value %s", val);
1989 return;
1991 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1992 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1993 &local_err);
1994 } else {
1995 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1997 } else {
1998 feat2prop(featurestr);
1999 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
2001 if (local_err) {
2002 error_propagate(errp, local_err);
2003 return;
2005 featurestr = strtok(NULL, ",");
2009 /* Print all cpuid feature names in featureset
2011 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2013 int bit;
2014 bool first = true;
2016 for (bit = 0; bit < 32; bit++) {
2017 if (featureset[bit]) {
2018 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2019 first = false;
2024 /* generate CPU information. */
2025 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2027 X86CPUDefinition *def;
2028 char buf[256];
2029 int i;
2031 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2032 def = &builtin_x86_defs[i];
2033 snprintf(buf, sizeof(buf), "%s", def->name);
2034 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2036 #ifdef CONFIG_KVM
2037 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2038 "KVM processor with all supported host features "
2039 "(only available in KVM mode)");
2040 #endif
2042 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2043 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2044 FeatureWordInfo *fw = &feature_word_info[i];
2046 (*cpu_fprintf)(f, " ");
2047 listflags(f, cpu_fprintf, fw->feat_names);
2048 (*cpu_fprintf)(f, "\n");
2052 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2054 CpuDefinitionInfoList *cpu_list = NULL;
2055 X86CPUDefinition *def;
2056 int i;
2058 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2059 CpuDefinitionInfoList *entry;
2060 CpuDefinitionInfo *info;
2062 def = &builtin_x86_defs[i];
2063 info = g_malloc0(sizeof(*info));
2064 info->name = g_strdup(def->name);
2066 entry = g_malloc0(sizeof(*entry));
2067 entry->value = info;
2068 entry->next = cpu_list;
2069 cpu_list = entry;
2072 return cpu_list;
2075 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2076 bool migratable_only)
2078 FeatureWordInfo *wi = &feature_word_info[w];
2079 uint32_t r;
2081 if (kvm_enabled()) {
2082 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2083 wi->cpuid_ecx,
2084 wi->cpuid_reg);
2085 } else if (tcg_enabled()) {
2086 r = wi->tcg_features;
2087 } else {
2088 return ~0;
2090 if (migratable_only) {
2091 r &= x86_cpu_get_migratable_flags(w);
2093 return r;
2097 * Filters CPU feature words based on host availability of each feature.
2099 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2101 static int x86_cpu_filter_features(X86CPU *cpu)
2103 CPUX86State *env = &cpu->env;
2104 FeatureWord w;
2105 int rv = 0;
2107 for (w = 0; w < FEATURE_WORDS; w++) {
2108 uint32_t host_feat =
2109 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2110 uint32_t requested_features = env->features[w];
2111 env->features[w] &= host_feat;
2112 cpu->filtered_features[w] = requested_features & ~env->features[w];
2113 if (cpu->filtered_features[w]) {
2114 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2115 report_unavailable_features(w, cpu->filtered_features[w]);
2117 rv = 1;
2121 return rv;
2124 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2126 PropValue *pv;
2127 for (pv = props; pv->prop; pv++) {
2128 if (!pv->value) {
2129 continue;
2131 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2132 &error_abort);
2136 /* Load data from X86CPUDefinition
2138 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2140 CPUX86State *env = &cpu->env;
2141 const char *vendor;
2142 char host_vendor[CPUID_VENDOR_SZ + 1];
2143 FeatureWord w;
2145 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2146 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2147 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2148 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2149 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2150 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2151 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2152 for (w = 0; w < FEATURE_WORDS; w++) {
2153 env->features[w] = def->features[w];
2156 /* Special cases not set in the X86CPUDefinition structs: */
2157 if (kvm_enabled()) {
2158 if (!kvm_irqchip_in_kernel()) {
2159 x86_cpu_change_kvm_default("x2apic", "off");
2162 x86_cpu_apply_props(cpu, kvm_default_props);
2165 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2167 /* sysenter isn't supported in compatibility mode on AMD,
2168 * syscall isn't supported in compatibility mode on Intel.
2169 * Normally we advertise the actual CPU vendor, but you can
2170 * override this using the 'vendor' property if you want to use
2171 * KVM's sysenter/syscall emulation in compatibility mode and
2172 * when doing cross vendor migration
2174 vendor = def->vendor;
2175 if (kvm_enabled()) {
2176 uint32_t ebx = 0, ecx = 0, edx = 0;
2177 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2178 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2179 vendor = host_vendor;
2182 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2186 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2188 X86CPU *cpu = NULL;
2189 ObjectClass *oc;
2190 gchar **model_pieces;
2191 char *name, *features;
2192 Error *error = NULL;
2194 model_pieces = g_strsplit(cpu_model, ",", 2);
2195 if (!model_pieces[0]) {
2196 error_setg(&error, "Invalid/empty CPU model name");
2197 goto out;
2199 name = model_pieces[0];
2200 features = model_pieces[1];
2202 oc = x86_cpu_class_by_name(name);
2203 if (oc == NULL) {
2204 error_setg(&error, "Unable to find CPU definition: %s", name);
2205 goto out;
2208 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2210 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2211 if (error) {
2212 goto out;
2215 out:
2216 if (error != NULL) {
2217 error_propagate(errp, error);
2218 if (cpu) {
2219 object_unref(OBJECT(cpu));
2220 cpu = NULL;
2223 g_strfreev(model_pieces);
2224 return cpu;
2227 X86CPU *cpu_x86_init(const char *cpu_model)
2229 Error *error = NULL;
2230 X86CPU *cpu;
2232 cpu = cpu_x86_create(cpu_model, &error);
2233 if (error) {
2234 goto out;
2237 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2239 out:
2240 if (error) {
2241 error_report_err(error);
2242 if (cpu != NULL) {
2243 object_unref(OBJECT(cpu));
2244 cpu = NULL;
2247 return cpu;
2250 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2252 X86CPUDefinition *cpudef = data;
2253 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2255 xcc->cpu_def = cpudef;
2258 static void x86_register_cpudef_type(X86CPUDefinition *def)
2260 char *typename = x86_cpu_type_name(def->name);
2261 TypeInfo ti = {
2262 .name = typename,
2263 .parent = TYPE_X86_CPU,
2264 .class_init = x86_cpu_cpudef_class_init,
2265 .class_data = def,
2268 type_register(&ti);
2269 g_free(typename);
2272 #if !defined(CONFIG_USER_ONLY)
2274 void cpu_clear_apic_feature(CPUX86State *env)
2276 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2279 #endif /* !CONFIG_USER_ONLY */
2281 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2282 uint32_t *eax, uint32_t *ebx,
2283 uint32_t *ecx, uint32_t *edx)
2285 X86CPU *cpu = x86_env_get_cpu(env);
2286 CPUState *cs = CPU(cpu);
2288 /* test if maximum index reached */
2289 if (index & 0x80000000) {
2290 if (index > env->cpuid_xlevel) {
2291 if (env->cpuid_xlevel2 > 0) {
2292 /* Handle the Centaur's CPUID instruction. */
2293 if (index > env->cpuid_xlevel2) {
2294 index = env->cpuid_xlevel2;
2295 } else if (index < 0xC0000000) {
2296 index = env->cpuid_xlevel;
2298 } else {
2299 /* Intel documentation states that invalid EAX input will
2300 * return the same information as EAX=cpuid_level
2301 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2303 index = env->cpuid_level;
2306 } else {
2307 if (index > env->cpuid_level)
2308 index = env->cpuid_level;
2311 switch(index) {
2312 case 0:
2313 *eax = env->cpuid_level;
2314 *ebx = env->cpuid_vendor1;
2315 *edx = env->cpuid_vendor2;
2316 *ecx = env->cpuid_vendor3;
2317 break;
2318 case 1:
2319 *eax = env->cpuid_version;
2320 *ebx = (cpu->apic_id << 24) |
2321 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 *ecx = env->features[FEAT_1_ECX];
2323 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2324 *ecx |= CPUID_EXT_OSXSAVE;
2326 *edx = env->features[FEAT_1_EDX];
2327 if (cs->nr_cores * cs->nr_threads > 1) {
2328 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2329 *edx |= CPUID_HT;
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 if (cpu->cache_info_passthrough) {
2335 host_cpuid(index, 0, eax, ebx, ecx, edx);
2336 break;
2338 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2339 *ebx = 0;
2340 *ecx = 0;
2341 *edx = (L1D_DESCRIPTOR << 16) | \
2342 (L1I_DESCRIPTOR << 8) | \
2343 (L2_DESCRIPTOR);
2344 break;
2345 case 4:
2346 /* cache info: needed for Core compatibility */
2347 if (cpu->cache_info_passthrough) {
2348 host_cpuid(index, count, eax, ebx, ecx, edx);
2349 *eax &= ~0xFC000000;
2350 } else {
2351 *eax = 0;
2352 switch (count) {
2353 case 0: /* L1 dcache info */
2354 *eax |= CPUID_4_TYPE_DCACHE | \
2355 CPUID_4_LEVEL(1) | \
2356 CPUID_4_SELF_INIT_LEVEL;
2357 *ebx = (L1D_LINE_SIZE - 1) | \
2358 ((L1D_PARTITIONS - 1) << 12) | \
2359 ((L1D_ASSOCIATIVITY - 1) << 22);
2360 *ecx = L1D_SETS - 1;
2361 *edx = CPUID_4_NO_INVD_SHARING;
2362 break;
2363 case 1: /* L1 icache info */
2364 *eax |= CPUID_4_TYPE_ICACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1I_LINE_SIZE - 1) | \
2368 ((L1I_PARTITIONS - 1) << 12) | \
2369 ((L1I_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1I_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2372 break;
2373 case 2: /* L2 cache info */
2374 *eax |= CPUID_4_TYPE_UNIFIED | \
2375 CPUID_4_LEVEL(2) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 if (cs->nr_threads > 1) {
2378 *eax |= (cs->nr_threads - 1) << 14;
2380 *ebx = (L2_LINE_SIZE - 1) | \
2381 ((L2_PARTITIONS - 1) << 12) | \
2382 ((L2_ASSOCIATIVITY - 1) << 22);
2383 *ecx = L2_SETS - 1;
2384 *edx = CPUID_4_NO_INVD_SHARING;
2385 break;
2386 default: /* end of info */
2387 *eax = 0;
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2395 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2396 if ((*eax & 31) && cs->nr_cores > 1) {
2397 *eax |= (cs->nr_cores - 1) << 26;
2399 break;
2400 case 5:
2401 /* mwait info: needed for Core compatibility */
2402 *eax = 0; /* Smallest monitor-line size in bytes */
2403 *ebx = 0; /* Largest monitor-line size in bytes */
2404 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2405 *edx = 0;
2406 break;
2407 case 6:
2408 /* Thermal and Power Leaf */
2409 *eax = env->features[FEAT_6_EAX];
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 case 7:
2415 /* Structured Extended Feature Flags Enumeration Leaf */
2416 if (count == 0) {
2417 *eax = 0; /* Maximum ECX value for sub-leaves */
2418 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2419 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2420 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2421 *ecx |= CPUID_7_0_ECX_OSPKE;
2423 *edx = 0; /* Reserved */
2424 } else {
2425 *eax = 0;
2426 *ebx = 0;
2427 *ecx = 0;
2428 *edx = 0;
2430 break;
2431 case 9:
2432 /* Direct Cache Access Information Leaf */
2433 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2434 *ebx = 0;
2435 *ecx = 0;
2436 *edx = 0;
2437 break;
2438 case 0xA:
2439 /* Architectural Performance Monitoring Leaf */
2440 if (kvm_enabled() && cpu->enable_pmu) {
2441 KVMState *s = cs->kvm_state;
2443 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2444 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2445 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2446 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2447 } else {
2448 *eax = 0;
2449 *ebx = 0;
2450 *ecx = 0;
2451 *edx = 0;
2453 break;
2454 case 0xB:
2455 /* Extended Topology Enumeration Leaf */
2456 if (!cpu->enable_cpuid_0xb) {
2457 *eax = *ebx = *ecx = *edx = 0;
2458 break;
2461 *ecx = count & 0xff;
2462 *edx = cpu->apic_id;
2464 switch (count) {
2465 case 0:
2466 *eax = apicid_core_offset(smp_cores, smp_threads);
2467 *ebx = smp_threads;
2468 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2469 break;
2470 case 1:
2471 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2472 *ebx = smp_cores * smp_threads;
2473 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2474 break;
2475 default:
2476 *eax = 0;
2477 *ebx = 0;
2478 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2481 assert(!(*eax & ~0x1f));
2482 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2483 break;
2484 case 0xD: {
2485 KVMState *s = cs->kvm_state;
2486 uint64_t ena_mask;
2487 int i;
2489 /* Processor Extended State */
2490 *eax = 0;
2491 *ebx = 0;
2492 *ecx = 0;
2493 *edx = 0;
2494 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2495 break;
2497 if (kvm_enabled()) {
2498 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2499 ena_mask <<= 32;
2500 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2501 } else {
2502 ena_mask = -1;
2505 if (count == 0) {
2506 *ecx = 0x240;
2507 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2508 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2509 if ((env->features[esa->feature] & esa->bits) == esa->bits
2510 && ((ena_mask >> i) & 1) != 0) {
2511 if (i < 32) {
2512 *eax |= 1u << i;
2513 } else {
2514 *edx |= 1u << (i - 32);
2516 *ecx = MAX(*ecx, esa->offset + esa->size);
2519 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2520 *ebx = *ecx;
2521 } else if (count == 1) {
2522 *eax = env->features[FEAT_XSAVE];
2523 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2524 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2525 if ((env->features[esa->feature] & esa->bits) == esa->bits
2526 && ((ena_mask >> count) & 1) != 0) {
2527 *eax = esa->size;
2528 *ebx = esa->offset;
2531 break;
2533 case 0x80000000:
2534 *eax = env->cpuid_xlevel;
2535 *ebx = env->cpuid_vendor1;
2536 *edx = env->cpuid_vendor2;
2537 *ecx = env->cpuid_vendor3;
2538 break;
2539 case 0x80000001:
2540 *eax = env->cpuid_version;
2541 *ebx = 0;
2542 *ecx = env->features[FEAT_8000_0001_ECX];
2543 *edx = env->features[FEAT_8000_0001_EDX];
2545 /* The Linux kernel checks for the CMPLegacy bit and
2546 * discards multiple thread information if it is set.
2547 * So don't set it here for Intel to make Linux guests happy.
2549 if (cs->nr_cores * cs->nr_threads > 1) {
2550 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2551 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2552 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2553 *ecx |= 1 << 1; /* CmpLegacy bit */
2556 break;
2557 case 0x80000002:
2558 case 0x80000003:
2559 case 0x80000004:
2560 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2561 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2562 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2563 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2564 break;
2565 case 0x80000005:
2566 /* cache info (L1 cache) */
2567 if (cpu->cache_info_passthrough) {
2568 host_cpuid(index, 0, eax, ebx, ecx, edx);
2569 break;
2571 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2572 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2573 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2574 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2575 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2576 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2577 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2578 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2579 break;
2580 case 0x80000006:
2581 /* cache info (L2 cache) */
2582 if (cpu->cache_info_passthrough) {
2583 host_cpuid(index, 0, eax, ebx, ecx, edx);
2584 break;
2586 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2587 (L2_DTLB_2M_ENTRIES << 16) | \
2588 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2589 (L2_ITLB_2M_ENTRIES);
2590 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2591 (L2_DTLB_4K_ENTRIES << 16) | \
2592 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2593 (L2_ITLB_4K_ENTRIES);
2594 *ecx = (L2_SIZE_KB_AMD << 16) | \
2595 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2596 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2597 *edx = ((L3_SIZE_KB/512) << 18) | \
2598 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2599 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2600 break;
2601 case 0x80000007:
2602 *eax = 0;
2603 *ebx = 0;
2604 *ecx = 0;
2605 *edx = env->features[FEAT_8000_0007_EDX];
2606 break;
2607 case 0x80000008:
2608 /* virtual & phys address size in low 2 bytes. */
2609 /* XXX: This value must match the one used in the MMU code. */
2610 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2611 /* 64 bit processor */
2612 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2613 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2614 } else {
2615 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2616 *eax = 0x00000024; /* 36 bits physical */
2617 } else {
2618 *eax = 0x00000020; /* 32 bits physical */
2621 *ebx = 0;
2622 *ecx = 0;
2623 *edx = 0;
2624 if (cs->nr_cores * cs->nr_threads > 1) {
2625 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2627 break;
2628 case 0x8000000A:
2629 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2630 *eax = 0x00000001; /* SVM Revision */
2631 *ebx = 0x00000010; /* nr of ASIDs */
2632 *ecx = 0;
2633 *edx = env->features[FEAT_SVM]; /* optional features */
2634 } else {
2635 *eax = 0;
2636 *ebx = 0;
2637 *ecx = 0;
2638 *edx = 0;
2640 break;
2641 case 0xC0000000:
2642 *eax = env->cpuid_xlevel2;
2643 *ebx = 0;
2644 *ecx = 0;
2645 *edx = 0;
2646 break;
2647 case 0xC0000001:
2648 /* Support for VIA CPU's CPUID instruction */
2649 *eax = env->cpuid_version;
2650 *ebx = 0;
2651 *ecx = 0;
2652 *edx = env->features[FEAT_C000_0001_EDX];
2653 break;
2654 case 0xC0000002:
2655 case 0xC0000003:
2656 case 0xC0000004:
2657 /* Reserved for the future, and now filled with zero */
2658 *eax = 0;
2659 *ebx = 0;
2660 *ecx = 0;
2661 *edx = 0;
2662 break;
2663 default:
2664 /* reserved values: zero */
2665 *eax = 0;
2666 *ebx = 0;
2667 *ecx = 0;
2668 *edx = 0;
2669 break;
2673 /* CPUClass::reset() */
2674 static void x86_cpu_reset(CPUState *s)
2676 X86CPU *cpu = X86_CPU(s);
2677 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2678 CPUX86State *env = &cpu->env;
2679 target_ulong cr4;
2680 uint64_t xcr0;
2681 int i;
2683 xcc->parent_reset(s);
2685 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2687 tlb_flush(s, 1);
2689 env->old_exception = -1;
2691 /* init to reset state */
2693 #ifdef CONFIG_SOFTMMU
2694 env->hflags |= HF_SOFTMMU_MASK;
2695 #endif
2696 env->hflags2 |= HF2_GIF_MASK;
2698 cpu_x86_update_cr0(env, 0x60000010);
2699 env->a20_mask = ~0x0;
2700 env->smbase = 0x30000;
2702 env->idt.limit = 0xffff;
2703 env->gdt.limit = 0xffff;
2704 env->ldt.limit = 0xffff;
2705 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2706 env->tr.limit = 0xffff;
2707 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2709 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2710 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2711 DESC_R_MASK | DESC_A_MASK);
2712 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2713 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2714 DESC_A_MASK);
2715 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2716 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2717 DESC_A_MASK);
2718 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2719 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2720 DESC_A_MASK);
2721 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2722 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2723 DESC_A_MASK);
2724 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2725 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2726 DESC_A_MASK);
2728 env->eip = 0xfff0;
2729 env->regs[R_EDX] = env->cpuid_version;
2731 env->eflags = 0x2;
2733 /* FPU init */
2734 for (i = 0; i < 8; i++) {
2735 env->fptags[i] = 1;
2737 cpu_set_fpuc(env, 0x37f);
2739 env->mxcsr = 0x1f80;
2740 /* All units are in INIT state. */
2741 env->xstate_bv = 0;
2743 env->pat = 0x0007040600070406ULL;
2744 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2746 memset(env->dr, 0, sizeof(env->dr));
2747 env->dr[6] = DR6_FIXED_1;
2748 env->dr[7] = DR7_FIXED_1;
2749 cpu_breakpoint_remove_all(s, BP_CPU);
2750 cpu_watchpoint_remove_all(s, BP_CPU);
2752 cr4 = 0;
2753 xcr0 = XSTATE_FP_MASK;
2755 #ifdef CONFIG_USER_ONLY
2756 /* Enable all the features for user-mode. */
2757 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2758 xcr0 |= XSTATE_SSE_MASK;
2760 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2761 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2762 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2763 xcr0 |= 1ull << i;
2767 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2768 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2770 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2771 cr4 |= CR4_FSGSBASE_MASK;
2773 #endif
2775 env->xcr0 = xcr0;
2776 cpu_x86_update_cr4(env, cr4);
2779 * SDM 11.11.5 requires:
2780 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2781 * - IA32_MTRR_PHYSMASKn.V = 0
2782 * All other bits are undefined. For simplification, zero it all.
2784 env->mtrr_deftype = 0;
2785 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2786 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2788 #if !defined(CONFIG_USER_ONLY)
2789 /* We hard-wire the BSP to the first CPU. */
2790 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2792 s->halted = !cpu_is_bsp(cpu);
2794 if (kvm_enabled()) {
2795 kvm_arch_reset_vcpu(cpu);
2797 #endif
2800 #ifndef CONFIG_USER_ONLY
2801 bool cpu_is_bsp(X86CPU *cpu)
2803 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2806 /* TODO: remove me, when reset over QOM tree is implemented */
2807 static void x86_cpu_machine_reset_cb(void *opaque)
2809 X86CPU *cpu = opaque;
2810 cpu_reset(CPU(cpu));
2812 #endif
2814 static void mce_init(X86CPU *cpu)
2816 CPUX86State *cenv = &cpu->env;
2817 unsigned int bank;
2819 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2820 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2821 (CPUID_MCE | CPUID_MCA)) {
2822 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2823 cenv->mcg_ctl = ~(uint64_t)0;
2824 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2825 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2830 #ifndef CONFIG_USER_ONLY
2831 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2833 APICCommonState *apic;
2834 const char *apic_type = "apic";
2836 if (kvm_apic_in_kernel()) {
2837 apic_type = "kvm-apic";
2838 } else if (xen_enabled()) {
2839 apic_type = "xen-apic";
2842 cpu->apic_state = DEVICE(object_new(apic_type));
2844 object_property_add_child(OBJECT(cpu), "apic",
2845 OBJECT(cpu->apic_state), NULL);
2846 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2847 /* TODO: convert to link<> */
2848 apic = APIC_COMMON(cpu->apic_state);
2849 apic->cpu = cpu;
2850 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2853 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2855 APICCommonState *apic;
2856 static bool apic_mmio_map_once;
2858 if (cpu->apic_state == NULL) {
2859 return;
2861 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2862 errp);
2864 /* Map APIC MMIO area */
2865 apic = APIC_COMMON(cpu->apic_state);
2866 if (!apic_mmio_map_once) {
2867 memory_region_add_subregion_overlap(get_system_memory(),
2868 apic->apicbase &
2869 MSR_IA32_APICBASE_BASE,
2870 &apic->io_memory,
2871 0x1000);
2872 apic_mmio_map_once = true;
2876 static void x86_cpu_machine_done(Notifier *n, void *unused)
2878 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2879 MemoryRegion *smram =
2880 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2882 if (smram) {
2883 cpu->smram = g_new(MemoryRegion, 1);
2884 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2885 smram, 0, 1ull << 32);
2886 memory_region_set_enabled(cpu->smram, false);
2887 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2890 #else
2891 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2894 #endif
2897 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2898 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2899 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2900 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2901 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2902 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2903 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2905 CPUState *cs = CPU(dev);
2906 X86CPU *cpu = X86_CPU(dev);
2907 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2908 CPUX86State *env = &cpu->env;
2909 Error *local_err = NULL;
2910 static bool ht_warned;
2911 FeatureWord w;
2913 if (xcc->kvm_required && !kvm_enabled()) {
2914 char *name = x86_cpu_class_get_model_name(xcc);
2915 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2916 g_free(name);
2917 goto out;
2920 if (cpu->apic_id < 0) {
2921 error_setg(errp, "apic-id property was not initialized properly");
2922 return;
2925 /*TODO: cpu->host_features incorrectly overwrites features
2926 * set using "feat=on|off". Once we fix this, we can convert
2927 * plus_features & minus_features to global properties
2928 * inside x86_cpu_parse_featurestr() too.
2930 if (cpu->host_features) {
2931 for (w = 0; w < FEATURE_WORDS; w++) {
2932 env->features[w] =
2933 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2937 for (w = 0; w < FEATURE_WORDS; w++) {
2938 cpu->env.features[w] |= plus_features[w];
2939 cpu->env.features[w] &= ~minus_features[w];
2942 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2943 env->cpuid_level = 7;
2946 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2947 error_setg(&local_err,
2948 kvm_enabled() ?
2949 "Host doesn't support requested features" :
2950 "TCG doesn't support requested features");
2951 goto out;
2954 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2955 * CPUID[1].EDX.
2957 if (IS_AMD_CPU(env)) {
2958 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2959 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2960 & CPUID_EXT2_AMD_ALIASES);
2964 cpu_exec_init(cs, &error_abort);
2966 if (tcg_enabled()) {
2967 tcg_x86_init();
2970 #ifndef CONFIG_USER_ONLY
2971 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2973 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2974 x86_cpu_apic_create(cpu, &local_err);
2975 if (local_err != NULL) {
2976 goto out;
2979 #endif
2981 mce_init(cpu);
2983 #ifndef CONFIG_USER_ONLY
2984 if (tcg_enabled()) {
2985 AddressSpace *newas = g_new(AddressSpace, 1);
2987 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2988 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2990 /* Outer container... */
2991 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2992 memory_region_set_enabled(cpu->cpu_as_root, true);
2994 /* ... with two regions inside: normal system memory with low
2995 * priority, and...
2997 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2998 get_system_memory(), 0, ~0ull);
2999 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3000 memory_region_set_enabled(cpu->cpu_as_mem, true);
3001 address_space_init(newas, cpu->cpu_as_root, "CPU");
3002 cs->num_ases = 1;
3003 cpu_address_space_init(cs, newas, 0);
3005 /* ... SMRAM with higher priority, linked from /machine/smram. */
3006 cpu->machine_done.notify = x86_cpu_machine_done;
3007 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3009 #endif
3011 qemu_init_vcpu(cs);
3013 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3014 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3015 * based on inputs (sockets,cores,threads), it is still better to gives
3016 * users a warning.
3018 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3019 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3021 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3022 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3023 " -smp options properly.");
3024 ht_warned = true;
3027 x86_cpu_apic_realize(cpu, &local_err);
3028 if (local_err != NULL) {
3029 goto out;
3031 cpu_reset(cs);
3033 xcc->parent_realize(dev, &local_err);
3035 out:
3036 if (local_err != NULL) {
3037 error_propagate(errp, local_err);
3038 return;
3042 typedef struct BitProperty {
3043 uint32_t *ptr;
3044 uint32_t mask;
3045 } BitProperty;
3047 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3048 void *opaque, Error **errp)
3050 BitProperty *fp = opaque;
3051 bool value = (*fp->ptr & fp->mask) == fp->mask;
3052 visit_type_bool(v, name, &value, errp);
3055 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3056 void *opaque, Error **errp)
3058 DeviceState *dev = DEVICE(obj);
3059 BitProperty *fp = opaque;
3060 Error *local_err = NULL;
3061 bool value;
3063 if (dev->realized) {
3064 qdev_prop_set_after_realize(dev, name, errp);
3065 return;
3068 visit_type_bool(v, name, &value, &local_err);
3069 if (local_err) {
3070 error_propagate(errp, local_err);
3071 return;
3074 if (value) {
3075 *fp->ptr |= fp->mask;
3076 } else {
3077 *fp->ptr &= ~fp->mask;
3081 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3082 void *opaque)
3084 BitProperty *prop = opaque;
3085 g_free(prop);
3088 /* Register a boolean property to get/set a single bit in a uint32_t field.
3090 * The same property name can be registered multiple times to make it affect
3091 * multiple bits in the same FeatureWord. In that case, the getter will return
3092 * true only if all bits are set.
3094 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3095 const char *prop_name,
3096 uint32_t *field,
3097 int bitnr)
3099 BitProperty *fp;
3100 ObjectProperty *op;
3101 uint32_t mask = (1UL << bitnr);
3103 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3104 if (op) {
3105 fp = op->opaque;
3106 assert(fp->ptr == field);
3107 fp->mask |= mask;
3108 } else {
3109 fp = g_new0(BitProperty, 1);
3110 fp->ptr = field;
3111 fp->mask = mask;
3112 object_property_add(OBJECT(cpu), prop_name, "bool",
3113 x86_cpu_get_bit_prop,
3114 x86_cpu_set_bit_prop,
3115 x86_cpu_release_bit_prop, fp, &error_abort);
3119 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3120 FeatureWord w,
3121 int bitnr)
3123 Object *obj = OBJECT(cpu);
3124 int i;
3125 char **names;
3126 FeatureWordInfo *fi = &feature_word_info[w];
3128 if (!fi->feat_names) {
3129 return;
3131 if (!fi->feat_names[bitnr]) {
3132 return;
3135 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3137 feat2prop(names[0]);
3138 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3140 for (i = 1; names[i]; i++) {
3141 feat2prop(names[i]);
3142 object_property_add_alias(obj, names[i], obj, names[0],
3143 &error_abort);
3146 g_strfreev(names);
3149 static void x86_cpu_initfn(Object *obj)
3151 CPUState *cs = CPU(obj);
3152 X86CPU *cpu = X86_CPU(obj);
3153 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3154 CPUX86State *env = &cpu->env;
3155 FeatureWord w;
3157 cs->env_ptr = env;
3159 object_property_add(obj, "family", "int",
3160 x86_cpuid_version_get_family,
3161 x86_cpuid_version_set_family, NULL, NULL, NULL);
3162 object_property_add(obj, "model", "int",
3163 x86_cpuid_version_get_model,
3164 x86_cpuid_version_set_model, NULL, NULL, NULL);
3165 object_property_add(obj, "stepping", "int",
3166 x86_cpuid_version_get_stepping,
3167 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3168 object_property_add_str(obj, "vendor",
3169 x86_cpuid_get_vendor,
3170 x86_cpuid_set_vendor, NULL);
3171 object_property_add_str(obj, "model-id",
3172 x86_cpuid_get_model_id,
3173 x86_cpuid_set_model_id, NULL);
3174 object_property_add(obj, "tsc-frequency", "int",
3175 x86_cpuid_get_tsc_freq,
3176 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3177 object_property_add(obj, "apic-id", "int",
3178 x86_cpuid_get_apic_id,
3179 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3180 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3181 x86_cpu_get_feature_words,
3182 NULL, NULL, (void *)env->features, NULL);
3183 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3184 x86_cpu_get_feature_words,
3185 NULL, NULL, (void *)cpu->filtered_features, NULL);
3187 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3189 #ifndef CONFIG_USER_ONLY
3190 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3191 cpu->apic_id = -1;
3192 #endif
3194 for (w = 0; w < FEATURE_WORDS; w++) {
3195 int bitnr;
3197 for (bitnr = 0; bitnr < 32; bitnr++) {
3198 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3202 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3205 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3207 X86CPU *cpu = X86_CPU(cs);
3209 return cpu->apic_id;
3212 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3214 X86CPU *cpu = X86_CPU(cs);
3216 return cpu->env.cr[0] & CR0_PG_MASK;
3219 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3221 X86CPU *cpu = X86_CPU(cs);
3223 cpu->env.eip = value;
3226 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3228 X86CPU *cpu = X86_CPU(cs);
3230 cpu->env.eip = tb->pc - tb->cs_base;
3233 static bool x86_cpu_has_work(CPUState *cs)
3235 X86CPU *cpu = X86_CPU(cs);
3236 CPUX86State *env = &cpu->env;
3238 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3239 CPU_INTERRUPT_POLL)) &&
3240 (env->eflags & IF_MASK)) ||
3241 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3242 CPU_INTERRUPT_INIT |
3243 CPU_INTERRUPT_SIPI |
3244 CPU_INTERRUPT_MCE)) ||
3245 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3246 !(env->hflags & HF_SMM_MASK));
3249 static Property x86_cpu_properties[] = {
3250 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3251 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3252 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3253 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3254 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3255 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3256 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3257 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3258 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3259 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3260 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3261 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3262 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3263 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3264 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3265 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3266 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3267 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3268 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3269 DEFINE_PROP_END_OF_LIST()
3272 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3274 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3275 CPUClass *cc = CPU_CLASS(oc);
3276 DeviceClass *dc = DEVICE_CLASS(oc);
3278 xcc->parent_realize = dc->realize;
3279 dc->realize = x86_cpu_realizefn;
3280 dc->props = x86_cpu_properties;
3282 xcc->parent_reset = cc->reset;
3283 cc->reset = x86_cpu_reset;
3284 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3286 cc->class_by_name = x86_cpu_class_by_name;
3287 cc->parse_features = x86_cpu_parse_featurestr;
3288 cc->has_work = x86_cpu_has_work;
3289 cc->do_interrupt = x86_cpu_do_interrupt;
3290 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3291 cc->dump_state = x86_cpu_dump_state;
3292 cc->set_pc = x86_cpu_set_pc;
3293 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3294 cc->gdb_read_register = x86_cpu_gdb_read_register;
3295 cc->gdb_write_register = x86_cpu_gdb_write_register;
3296 cc->get_arch_id = x86_cpu_get_arch_id;
3297 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3298 #ifdef CONFIG_USER_ONLY
3299 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3300 #else
3301 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3302 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3303 cc->write_elf64_note = x86_cpu_write_elf64_note;
3304 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3305 cc->write_elf32_note = x86_cpu_write_elf32_note;
3306 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3307 cc->vmsd = &vmstate_x86_cpu;
3308 #endif
3309 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3310 #ifndef CONFIG_USER_ONLY
3311 cc->debug_excp_handler = breakpoint_handler;
3312 #endif
3313 cc->cpu_exec_enter = x86_cpu_exec_enter;
3314 cc->cpu_exec_exit = x86_cpu_exec_exit;
3317 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3318 * object in cpus -> dangling pointer after final object_unref().
3320 dc->cannot_destroy_with_object_finalize_yet = true;
3323 static const TypeInfo x86_cpu_type_info = {
3324 .name = TYPE_X86_CPU,
3325 .parent = TYPE_CPU,
3326 .instance_size = sizeof(X86CPU),
3327 .instance_init = x86_cpu_initfn,
3328 .abstract = true,
3329 .class_size = sizeof(X86CPUClass),
3330 .class_init = x86_cpu_common_class_init,
3333 static void x86_cpu_register_types(void)
3335 int i;
3337 type_register_static(&x86_cpu_type_info);
3338 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3339 x86_register_cpudef_type(&builtin_x86_defs[i]);
3341 #ifdef CONFIG_KVM
3342 type_register_static(&host_x86_cpu_type_info);
3343 #endif
3346 type_init(x86_cpu_register_types)