qapi-dealloc: Reduce use outside of generated code
[qemu/ar7.git] / target-i386 / cpu.c
blob0f38d1eae317ac4cc155d7623fa1712ab6adc77d
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/cpus.h"
24 #include "kvm_i386.h"
26 #include "qemu/error-report.h"
27 #include "qemu/option.h"
28 #include "qemu/config-file.h"
29 #include "qapi/qmp/qerror.h"
31 #include "qapi-types.h"
32 #include "qapi-visit.h"
33 #include "qapi/visitor.h"
34 #include "sysemu/arch_init.h"
36 #include "hw/hw.h"
37 #if defined(CONFIG_KVM)
38 #include <linux/kvm_para.h>
39 #endif
41 #include "sysemu/sysemu.h"
42 #include "hw/qdev-properties.h"
43 #ifndef CONFIG_USER_ONLY
44 #include "exec/address-spaces.h"
45 #include "hw/xen/xen.h"
46 #include "hw/i386/apic_internal.h"
47 #endif
50 /* Cache topology CPUID constants: */
52 /* CPUID Leaf 2 Descriptors */
54 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
55 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
56 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
59 /* CPUID Leaf 4 constants: */
61 /* EAX: */
62 #define CPUID_4_TYPE_DCACHE 1
63 #define CPUID_4_TYPE_ICACHE 2
64 #define CPUID_4_TYPE_UNIFIED 3
66 #define CPUID_4_LEVEL(l) ((l) << 5)
68 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
69 #define CPUID_4_FULLY_ASSOC (1 << 9)
71 /* EDX: */
72 #define CPUID_4_NO_INVD_SHARING (1 << 0)
73 #define CPUID_4_INCLUSIVE (1 << 1)
74 #define CPUID_4_COMPLEX_IDX (1 << 2)
76 #define ASSOC_FULL 0xFF
78 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
79 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
80 a == 2 ? 0x2 : \
81 a == 4 ? 0x4 : \
82 a == 8 ? 0x6 : \
83 a == 16 ? 0x8 : \
84 a == 32 ? 0xA : \
85 a == 48 ? 0xB : \
86 a == 64 ? 0xC : \
87 a == 96 ? 0xD : \
88 a == 128 ? 0xE : \
89 a == ASSOC_FULL ? 0xF : \
90 0 /* invalid value */)
93 /* Definitions of the hardcoded cache entries we expose: */
95 /* L1 data cache: */
96 #define L1D_LINE_SIZE 64
97 #define L1D_ASSOCIATIVITY 8
98 #define L1D_SETS 64
99 #define L1D_PARTITIONS 1
100 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
101 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
102 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
103 #define L1D_LINES_PER_TAG 1
104 #define L1D_SIZE_KB_AMD 64
105 #define L1D_ASSOCIATIVITY_AMD 2
107 /* L1 instruction cache: */
108 #define L1I_LINE_SIZE 64
109 #define L1I_ASSOCIATIVITY 8
110 #define L1I_SETS 64
111 #define L1I_PARTITIONS 1
112 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
113 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
114 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
115 #define L1I_LINES_PER_TAG 1
116 #define L1I_SIZE_KB_AMD 64
117 #define L1I_ASSOCIATIVITY_AMD 2
119 /* Level 2 unified cache: */
120 #define L2_LINE_SIZE 64
121 #define L2_ASSOCIATIVITY 16
122 #define L2_SETS 4096
123 #define L2_PARTITIONS 1
124 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
125 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
126 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
127 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
128 #define L2_LINES_PER_TAG 1
129 #define L2_SIZE_KB_AMD 512
131 /* No L3 cache: */
132 #define L3_SIZE_KB 0 /* disabled */
133 #define L3_ASSOCIATIVITY 0 /* disabled */
134 #define L3_LINES_PER_TAG 0 /* disabled */
135 #define L3_LINE_SIZE 0 /* disabled */
137 /* TLB definitions: */
139 #define L1_DTLB_2M_ASSOC 1
140 #define L1_DTLB_2M_ENTRIES 255
141 #define L1_DTLB_4K_ASSOC 1
142 #define L1_DTLB_4K_ENTRIES 255
144 #define L1_ITLB_2M_ASSOC 1
145 #define L1_ITLB_2M_ENTRIES 255
146 #define L1_ITLB_4K_ASSOC 1
147 #define L1_ITLB_4K_ENTRIES 255
149 #define L2_DTLB_2M_ASSOC 0 /* disabled */
150 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
151 #define L2_DTLB_4K_ASSOC 4
152 #define L2_DTLB_4K_ENTRIES 512
154 #define L2_ITLB_2M_ASSOC 0 /* disabled */
155 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
156 #define L2_ITLB_4K_ASSOC 4
157 #define L2_ITLB_4K_ENTRIES 512
161 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
162 uint32_t vendor2, uint32_t vendor3)
164 int i;
165 for (i = 0; i < 4; i++) {
166 dst[i] = vendor1 >> (8 * i);
167 dst[i + 4] = vendor2 >> (8 * i);
168 dst[i + 8] = vendor3 >> (8 * i);
170 dst[CPUID_VENDOR_SZ] = '\0';
173 /* feature flags taken from "Intel Processor Identification and the CPUID
174 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
175 * between feature naming conventions, aliases may be added.
177 static const char *feature_name[] = {
178 "fpu", "vme", "de", "pse",
179 "tsc", "msr", "pae", "mce",
180 "cx8", "apic", NULL, "sep",
181 "mtrr", "pge", "mca", "cmov",
182 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
183 NULL, "ds" /* Intel dts */, "acpi", "mmx",
184 "fxsr", "sse", "sse2", "ss",
185 "ht" /* Intel htt */, "tm", "ia64", "pbe",
187 static const char *ext_feature_name[] = {
188 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
189 "ds_cpl", "vmx", "smx", "est",
190 "tm2", "ssse3", "cid", NULL,
191 "fma", "cx16", "xtpr", "pdcm",
192 NULL, "pcid", "dca", "sse4.1|sse4_1",
193 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
194 "tsc-deadline", "aes", "xsave", "osxsave",
195 "avx", "f16c", "rdrand", "hypervisor",
197 /* Feature names that are already defined on feature_name[] but are set on
198 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
199 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
200 * if and only if CPU vendor is AMD.
202 static const char *ext2_feature_name[] = {
203 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
204 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
205 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
206 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
207 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
208 "nx|xd", NULL, "mmxext", NULL /* mmx */,
209 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
210 NULL, "lm|i64", "3dnowext", "3dnow",
212 static const char *ext3_feature_name[] = {
213 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
214 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
215 "3dnowprefetch", "osvw", "ibs", "xop",
216 "skinit", "wdt", NULL, "lwp",
217 "fma4", "tce", NULL, "nodeid_msr",
218 NULL, "tbm", "topoext", "perfctr_core",
219 "perfctr_nb", NULL, NULL, NULL,
220 NULL, NULL, NULL, NULL,
223 static const char *ext4_feature_name[] = {
224 NULL, NULL, "xstore", "xstore-en",
225 NULL, NULL, "xcrypt", "xcrypt-en",
226 "ace2", "ace2-en", "phe", "phe-en",
227 "pmm", "pmm-en", NULL, NULL,
228 NULL, NULL, NULL, NULL,
229 NULL, NULL, NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
234 static const char *kvm_feature_name[] = {
235 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
236 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
237 NULL, NULL, NULL, NULL,
238 NULL, NULL, NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 "kvmclock-stable-bit", NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
245 static const char *svm_feature_name[] = {
246 "npt", "lbrv", "svm_lock", "nrip_save",
247 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
248 NULL, NULL, "pause_filter", NULL,
249 "pfthreshold", NULL, NULL, NULL,
250 NULL, NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
256 static const char *cpuid_7_0_ebx_feature_name[] = {
257 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
258 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
259 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
260 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
263 static const char *cpuid_7_0_ecx_feature_name[] = {
264 NULL, NULL, NULL, "pku",
265 "ospke", NULL, NULL, NULL,
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
274 static const char *cpuid_apm_edx_feature_name[] = {
275 NULL, NULL, NULL, NULL,
276 NULL, NULL, NULL, NULL,
277 "invtsc", NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
285 static const char *cpuid_xsave_feature_name[] = {
286 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
287 NULL, NULL, NULL, NULL,
288 NULL, NULL, NULL, NULL,
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
296 static const char *cpuid_6_feature_name[] = {
297 NULL, NULL, "arat", NULL,
298 NULL, NULL, NULL, NULL,
299 NULL, NULL, NULL, NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
307 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
308 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
309 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
310 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
311 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
312 CPUID_PSE36 | CPUID_FXSR)
313 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
314 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
315 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
316 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
317 CPUID_PAE | CPUID_SEP | CPUID_APIC)
319 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
320 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
321 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
322 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
323 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
324 /* partly implemented:
325 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
326 /* missing:
327 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
328 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
329 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
330 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
331 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
332 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
333 /* missing:
334 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
335 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
336 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
337 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
338 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
340 #ifdef TARGET_X86_64
341 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
342 #else
343 #define TCG_EXT2_X86_64_FEATURES 0
344 #endif
346 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
347 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
348 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
349 TCG_EXT2_X86_64_FEATURES)
350 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
351 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
352 #define TCG_EXT4_FEATURES 0
353 #define TCG_SVM_FEATURES 0
354 #define TCG_KVM_FEATURES 0
355 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
356 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
357 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
358 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
359 /* missing:
360 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
361 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
362 CPUID_7_0_EBX_RDSEED */
363 #define TCG_7_0_ECX_FEATURES 0
364 #define TCG_APM_FEATURES 0
365 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
366 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
367 /* missing:
368 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
370 typedef struct FeatureWordInfo {
371 const char **feat_names;
372 uint32_t cpuid_eax; /* Input EAX for CPUID */
373 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
374 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
375 int cpuid_reg; /* output register (R_* constant) */
376 uint32_t tcg_features; /* Feature flags supported by TCG */
377 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
378 } FeatureWordInfo;
380 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
381 [FEAT_1_EDX] = {
382 .feat_names = feature_name,
383 .cpuid_eax = 1, .cpuid_reg = R_EDX,
384 .tcg_features = TCG_FEATURES,
386 [FEAT_1_ECX] = {
387 .feat_names = ext_feature_name,
388 .cpuid_eax = 1, .cpuid_reg = R_ECX,
389 .tcg_features = TCG_EXT_FEATURES,
391 [FEAT_8000_0001_EDX] = {
392 .feat_names = ext2_feature_name,
393 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
394 .tcg_features = TCG_EXT2_FEATURES,
396 [FEAT_8000_0001_ECX] = {
397 .feat_names = ext3_feature_name,
398 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
399 .tcg_features = TCG_EXT3_FEATURES,
401 [FEAT_C000_0001_EDX] = {
402 .feat_names = ext4_feature_name,
403 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
404 .tcg_features = TCG_EXT4_FEATURES,
406 [FEAT_KVM] = {
407 .feat_names = kvm_feature_name,
408 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
409 .tcg_features = TCG_KVM_FEATURES,
411 [FEAT_SVM] = {
412 .feat_names = svm_feature_name,
413 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
414 .tcg_features = TCG_SVM_FEATURES,
416 [FEAT_7_0_EBX] = {
417 .feat_names = cpuid_7_0_ebx_feature_name,
418 .cpuid_eax = 7,
419 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
420 .cpuid_reg = R_EBX,
421 .tcg_features = TCG_7_0_EBX_FEATURES,
423 [FEAT_7_0_ECX] = {
424 .feat_names = cpuid_7_0_ecx_feature_name,
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_ECX,
428 .tcg_features = TCG_7_0_ECX_FEATURES,
430 [FEAT_8000_0007_EDX] = {
431 .feat_names = cpuid_apm_edx_feature_name,
432 .cpuid_eax = 0x80000007,
433 .cpuid_reg = R_EDX,
434 .tcg_features = TCG_APM_FEATURES,
435 .unmigratable_flags = CPUID_APM_INVTSC,
437 [FEAT_XSAVE] = {
438 .feat_names = cpuid_xsave_feature_name,
439 .cpuid_eax = 0xd,
440 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
441 .cpuid_reg = R_EAX,
442 .tcg_features = TCG_XSAVE_FEATURES,
444 [FEAT_6_EAX] = {
445 .feat_names = cpuid_6_feature_name,
446 .cpuid_eax = 6, .cpuid_reg = R_EAX,
447 .tcg_features = TCG_6_EAX_FEATURES,
451 typedef struct X86RegisterInfo32 {
452 /* Name of register */
453 const char *name;
454 /* QAPI enum value register */
455 X86CPURegister32 qapi_enum;
456 } X86RegisterInfo32;
458 #define REGISTER(reg) \
459 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
460 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
461 REGISTER(EAX),
462 REGISTER(ECX),
463 REGISTER(EDX),
464 REGISTER(EBX),
465 REGISTER(ESP),
466 REGISTER(EBP),
467 REGISTER(ESI),
468 REGISTER(EDI),
470 #undef REGISTER
472 const ExtSaveArea x86_ext_save_areas[] = {
473 [XSTATE_YMM_BIT] =
474 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
475 .offset = 0x240, .size = 0x100 },
476 [XSTATE_BNDREGS_BIT] =
477 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
478 .offset = 0x3c0, .size = 0x40 },
479 [XSTATE_BNDCSR_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = 0x400, .size = 0x40 },
482 [XSTATE_OPMASK_BIT] =
483 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
484 .offset = 0x440, .size = 0x40 },
485 [XSTATE_ZMM_Hi256_BIT] =
486 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
487 .offset = 0x480, .size = 0x200 },
488 [XSTATE_Hi16_ZMM_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = 0x680, .size = 0x400 },
491 [XSTATE_PKRU_BIT] =
492 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
493 .offset = 0xA80, .size = 0x8 },
496 const char *get_register_name_32(unsigned int reg)
498 if (reg >= CPU_NB_REGS32) {
499 return NULL;
501 return x86_reg_info_32[reg].name;
505 * Returns the set of feature flags that are supported and migratable by
506 * QEMU, for a given FeatureWord.
508 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
510 FeatureWordInfo *wi = &feature_word_info[w];
511 uint32_t r = 0;
512 int i;
514 for (i = 0; i < 32; i++) {
515 uint32_t f = 1U << i;
516 /* If the feature name is unknown, it is not supported by QEMU yet */
517 if (!wi->feat_names[i]) {
518 continue;
520 /* Skip features known to QEMU, but explicitly marked as unmigratable */
521 if (wi->unmigratable_flags & f) {
522 continue;
524 r |= f;
526 return r;
529 void host_cpuid(uint32_t function, uint32_t count,
530 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
532 uint32_t vec[4];
534 #ifdef __x86_64__
535 asm volatile("cpuid"
536 : "=a"(vec[0]), "=b"(vec[1]),
537 "=c"(vec[2]), "=d"(vec[3])
538 : "0"(function), "c"(count) : "cc");
539 #elif defined(__i386__)
540 asm volatile("pusha \n\t"
541 "cpuid \n\t"
542 "mov %%eax, 0(%2) \n\t"
543 "mov %%ebx, 4(%2) \n\t"
544 "mov %%ecx, 8(%2) \n\t"
545 "mov %%edx, 12(%2) \n\t"
546 "popa"
547 : : "a"(function), "c"(count), "S"(vec)
548 : "memory", "cc");
549 #else
550 abort();
551 #endif
553 if (eax)
554 *eax = vec[0];
555 if (ebx)
556 *ebx = vec[1];
557 if (ecx)
558 *ecx = vec[2];
559 if (edx)
560 *edx = vec[3];
563 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
565 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
566 * a substring. ex if !NULL points to the first char after a substring,
567 * otherwise the string is assumed to sized by a terminating nul.
568 * Return lexical ordering of *s1:*s2.
570 static int sstrcmp(const char *s1, const char *e1,
571 const char *s2, const char *e2)
573 for (;;) {
574 if (!*s1 || !*s2 || *s1 != *s2)
575 return (*s1 - *s2);
576 ++s1, ++s2;
577 if (s1 == e1 && s2 == e2)
578 return (0);
579 else if (s1 == e1)
580 return (*s2);
581 else if (s2 == e2)
582 return (*s1);
586 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
587 * '|' delimited (possibly empty) strings in which case search for a match
588 * within the alternatives proceeds left to right. Return 0 for success,
589 * non-zero otherwise.
591 static int altcmp(const char *s, const char *e, const char *altstr)
593 const char *p, *q;
595 for (q = p = altstr; ; ) {
596 while (*p && *p != '|')
597 ++p;
598 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
599 return (0);
600 if (!*p)
601 return (1);
602 else
603 q = ++p;
607 /* search featureset for flag *[s..e), if found set corresponding bit in
608 * *pval and return true, otherwise return false
610 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
611 const char **featureset)
613 uint32_t mask;
614 const char **ppc;
615 bool found = false;
617 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
618 if (*ppc && !altcmp(s, e, *ppc)) {
619 *pval |= mask;
620 found = true;
623 return found;
626 static void add_flagname_to_bitmaps(const char *flagname,
627 FeatureWordArray words,
628 Error **errp)
630 FeatureWord w;
631 for (w = 0; w < FEATURE_WORDS; w++) {
632 FeatureWordInfo *wi = &feature_word_info[w];
633 if (wi->feat_names &&
634 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
635 break;
638 if (w == FEATURE_WORDS) {
639 error_setg(errp, "CPU feature %s not found", flagname);
643 /* CPU class name definitions: */
645 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
646 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
648 /* Return type name for a given CPU model name
649 * Caller is responsible for freeing the returned string.
651 static char *x86_cpu_type_name(const char *model_name)
653 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
656 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
658 ObjectClass *oc;
659 char *typename;
661 if (cpu_model == NULL) {
662 return NULL;
665 typename = x86_cpu_type_name(cpu_model);
666 oc = object_class_by_name(typename);
667 g_free(typename);
668 return oc;
671 struct X86CPUDefinition {
672 const char *name;
673 uint32_t level;
674 uint32_t xlevel;
675 uint32_t xlevel2;
676 /* vendor is zero-terminated, 12 character ASCII string */
677 char vendor[CPUID_VENDOR_SZ + 1];
678 int family;
679 int model;
680 int stepping;
681 FeatureWordArray features;
682 char model_id[48];
685 static X86CPUDefinition builtin_x86_defs[] = {
687 .name = "qemu64",
688 .level = 0xd,
689 .vendor = CPUID_VENDOR_AMD,
690 .family = 6,
691 .model = 6,
692 .stepping = 3,
693 .features[FEAT_1_EDX] =
694 PPRO_FEATURES |
695 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
696 CPUID_PSE36,
697 .features[FEAT_1_ECX] =
698 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
699 .features[FEAT_8000_0001_EDX] =
700 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
701 .features[FEAT_8000_0001_ECX] =
702 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
703 .xlevel = 0x8000000A,
706 .name = "phenom",
707 .level = 5,
708 .vendor = CPUID_VENDOR_AMD,
709 .family = 16,
710 .model = 2,
711 .stepping = 3,
712 /* Missing: CPUID_HT */
713 .features[FEAT_1_EDX] =
714 PPRO_FEATURES |
715 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
716 CPUID_PSE36 | CPUID_VME,
717 .features[FEAT_1_ECX] =
718 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
719 CPUID_EXT_POPCNT,
720 .features[FEAT_8000_0001_EDX] =
721 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
722 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
723 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
724 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
725 CPUID_EXT3_CR8LEG,
726 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
727 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
728 .features[FEAT_8000_0001_ECX] =
729 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
730 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
731 /* Missing: CPUID_SVM_LBRV */
732 .features[FEAT_SVM] =
733 CPUID_SVM_NPT,
734 .xlevel = 0x8000001A,
735 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
738 .name = "core2duo",
739 .level = 10,
740 .vendor = CPUID_VENDOR_INTEL,
741 .family = 6,
742 .model = 15,
743 .stepping = 11,
744 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
745 .features[FEAT_1_EDX] =
746 PPRO_FEATURES |
747 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
748 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
749 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
750 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
751 .features[FEAT_1_ECX] =
752 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
753 CPUID_EXT_CX16,
754 .features[FEAT_8000_0001_EDX] =
755 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
756 .features[FEAT_8000_0001_ECX] =
757 CPUID_EXT3_LAHF_LM,
758 .xlevel = 0x80000008,
759 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
762 .name = "kvm64",
763 .level = 0xd,
764 .vendor = CPUID_VENDOR_INTEL,
765 .family = 15,
766 .model = 6,
767 .stepping = 1,
768 /* Missing: CPUID_HT */
769 .features[FEAT_1_EDX] =
770 PPRO_FEATURES | CPUID_VME |
771 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
772 CPUID_PSE36,
773 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
774 .features[FEAT_1_ECX] =
775 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
776 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
777 .features[FEAT_8000_0001_EDX] =
778 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
779 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
780 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
781 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
782 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
783 .features[FEAT_8000_0001_ECX] =
785 .xlevel = 0x80000008,
786 .model_id = "Common KVM processor"
789 .name = "qemu32",
790 .level = 4,
791 .vendor = CPUID_VENDOR_INTEL,
792 .family = 6,
793 .model = 6,
794 .stepping = 3,
795 .features[FEAT_1_EDX] =
796 PPRO_FEATURES,
797 .features[FEAT_1_ECX] =
798 CPUID_EXT_SSE3,
799 .xlevel = 0x80000004,
802 .name = "kvm32",
803 .level = 5,
804 .vendor = CPUID_VENDOR_INTEL,
805 .family = 15,
806 .model = 6,
807 .stepping = 1,
808 .features[FEAT_1_EDX] =
809 PPRO_FEATURES | CPUID_VME |
810 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
811 .features[FEAT_1_ECX] =
812 CPUID_EXT_SSE3,
813 .features[FEAT_8000_0001_ECX] =
815 .xlevel = 0x80000008,
816 .model_id = "Common 32-bit KVM processor"
819 .name = "coreduo",
820 .level = 10,
821 .vendor = CPUID_VENDOR_INTEL,
822 .family = 6,
823 .model = 14,
824 .stepping = 8,
825 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
826 .features[FEAT_1_EDX] =
827 PPRO_FEATURES | CPUID_VME |
828 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
829 CPUID_SS,
830 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
831 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
832 .features[FEAT_1_ECX] =
833 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
834 .features[FEAT_8000_0001_EDX] =
835 CPUID_EXT2_NX,
836 .xlevel = 0x80000008,
837 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
840 .name = "486",
841 .level = 1,
842 .vendor = CPUID_VENDOR_INTEL,
843 .family = 4,
844 .model = 8,
845 .stepping = 0,
846 .features[FEAT_1_EDX] =
847 I486_FEATURES,
848 .xlevel = 0,
851 .name = "pentium",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 5,
855 .model = 4,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PENTIUM_FEATURES,
859 .xlevel = 0,
862 .name = "pentium2",
863 .level = 2,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 6,
866 .model = 5,
867 .stepping = 2,
868 .features[FEAT_1_EDX] =
869 PENTIUM2_FEATURES,
870 .xlevel = 0,
873 .name = "pentium3",
874 .level = 3,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 7,
878 .stepping = 3,
879 .features[FEAT_1_EDX] =
880 PENTIUM3_FEATURES,
881 .xlevel = 0,
884 .name = "athlon",
885 .level = 2,
886 .vendor = CPUID_VENDOR_AMD,
887 .family = 6,
888 .model = 2,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
892 CPUID_MCA,
893 .features[FEAT_8000_0001_EDX] =
894 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
895 .xlevel = 0x80000008,
898 .name = "n270",
899 .level = 10,
900 .vendor = CPUID_VENDOR_INTEL,
901 .family = 6,
902 .model = 28,
903 .stepping = 2,
904 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
905 .features[FEAT_1_EDX] =
906 PPRO_FEATURES |
907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
908 CPUID_ACPI | CPUID_SS,
909 /* Some CPUs got no CPUID_SEP */
910 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
911 * CPUID_EXT_XTPR */
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
914 CPUID_EXT_MOVBE,
915 .features[FEAT_8000_0001_EDX] =
916 CPUID_EXT2_NX,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x80000008,
920 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
923 .name = "Conroe",
924 .level = 10,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 15,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
939 .features[FEAT_8000_0001_ECX] =
940 CPUID_EXT3_LAHF_LM,
941 .xlevel = 0x80000008,
942 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
945 .name = "Penryn",
946 .level = 10,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 23,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
959 CPUID_EXT_SSE3,
960 .features[FEAT_8000_0001_EDX] =
961 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
962 .features[FEAT_8000_0001_ECX] =
963 CPUID_EXT3_LAHF_LM,
964 .xlevel = 0x80000008,
965 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
968 .name = "Nehalem",
969 .level = 11,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 26,
973 .stepping = 3,
974 .features[FEAT_1_EDX] =
975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
979 CPUID_DE | CPUID_FP87,
980 .features[FEAT_1_ECX] =
981 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
982 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
985 .features[FEAT_8000_0001_ECX] =
986 CPUID_EXT3_LAHF_LM,
987 .xlevel = 0x80000008,
988 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
991 .name = "Westmere",
992 .level = 11,
993 .vendor = CPUID_VENDOR_INTEL,
994 .family = 6,
995 .model = 44,
996 .stepping = 1,
997 .features[FEAT_1_EDX] =
998 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
999 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1000 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1001 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1002 CPUID_DE | CPUID_FP87,
1003 .features[FEAT_1_ECX] =
1004 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1005 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1006 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1007 .features[FEAT_8000_0001_EDX] =
1008 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1009 .features[FEAT_8000_0001_ECX] =
1010 CPUID_EXT3_LAHF_LM,
1011 .features[FEAT_6_EAX] =
1012 CPUID_6_EAX_ARAT,
1013 .xlevel = 0x80000008,
1014 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1017 .name = "SandyBridge",
1018 .level = 0xd,
1019 .vendor = CPUID_VENDOR_INTEL,
1020 .family = 6,
1021 .model = 42,
1022 .stepping = 1,
1023 .features[FEAT_1_EDX] =
1024 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1025 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1026 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1027 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1028 CPUID_DE | CPUID_FP87,
1029 .features[FEAT_1_ECX] =
1030 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1031 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1032 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1033 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1034 CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1037 CPUID_EXT2_SYSCALL,
1038 .features[FEAT_8000_0001_ECX] =
1039 CPUID_EXT3_LAHF_LM,
1040 .features[FEAT_XSAVE] =
1041 CPUID_XSAVE_XSAVEOPT,
1042 .features[FEAT_6_EAX] =
1043 CPUID_6_EAX_ARAT,
1044 .xlevel = 0x80000008,
1045 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1048 .name = "IvyBridge",
1049 .level = 0xd,
1050 .vendor = CPUID_VENDOR_INTEL,
1051 .family = 6,
1052 .model = 58,
1053 .stepping = 9,
1054 .features[FEAT_1_EDX] =
1055 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1056 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1057 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1058 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1059 CPUID_DE | CPUID_FP87,
1060 .features[FEAT_1_ECX] =
1061 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1062 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1063 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1064 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1065 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1066 .features[FEAT_7_0_EBX] =
1067 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1068 CPUID_7_0_EBX_ERMS,
1069 .features[FEAT_8000_0001_EDX] =
1070 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1071 CPUID_EXT2_SYSCALL,
1072 .features[FEAT_8000_0001_ECX] =
1073 CPUID_EXT3_LAHF_LM,
1074 .features[FEAT_XSAVE] =
1075 CPUID_XSAVE_XSAVEOPT,
1076 .features[FEAT_6_EAX] =
1077 CPUID_6_EAX_ARAT,
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1082 .name = "Haswell-noTSX",
1083 .level = 0xd,
1084 .vendor = CPUID_VENDOR_INTEL,
1085 .family = 6,
1086 .model = 60,
1087 .stepping = 1,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1096 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1097 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1098 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1099 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1100 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1101 .features[FEAT_8000_0001_EDX] =
1102 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1103 CPUID_EXT2_SYSCALL,
1104 .features[FEAT_8000_0001_ECX] =
1105 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1106 .features[FEAT_7_0_EBX] =
1107 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1108 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1109 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1110 .features[FEAT_XSAVE] =
1111 CPUID_XSAVE_XSAVEOPT,
1112 .features[FEAT_6_EAX] =
1113 CPUID_6_EAX_ARAT,
1114 .xlevel = 0x80000008,
1115 .model_id = "Intel Core Processor (Haswell, no TSX)",
1116 }, {
1117 .name = "Haswell",
1118 .level = 0xd,
1119 .vendor = CPUID_VENDOR_INTEL,
1120 .family = 6,
1121 .model = 60,
1122 .stepping = 1,
1123 .features[FEAT_1_EDX] =
1124 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1125 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1126 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1127 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1128 CPUID_DE | CPUID_FP87,
1129 .features[FEAT_1_ECX] =
1130 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1131 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1132 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1133 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1134 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1135 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1136 .features[FEAT_8000_0001_EDX] =
1137 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1138 CPUID_EXT2_SYSCALL,
1139 .features[FEAT_8000_0001_ECX] =
1140 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1141 .features[FEAT_7_0_EBX] =
1142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1143 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1144 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1145 CPUID_7_0_EBX_RTM,
1146 .features[FEAT_XSAVE] =
1147 CPUID_XSAVE_XSAVEOPT,
1148 .features[FEAT_6_EAX] =
1149 CPUID_6_EAX_ARAT,
1150 .xlevel = 0x80000008,
1151 .model_id = "Intel Core Processor (Haswell)",
1154 .name = "Broadwell-noTSX",
1155 .level = 0xd,
1156 .vendor = CPUID_VENDOR_INTEL,
1157 .family = 6,
1158 .model = 61,
1159 .stepping = 2,
1160 .features[FEAT_1_EDX] =
1161 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1162 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1163 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1164 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1165 CPUID_DE | CPUID_FP87,
1166 .features[FEAT_1_ECX] =
1167 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1168 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1169 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1170 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1171 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1172 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1173 .features[FEAT_8000_0001_EDX] =
1174 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1175 CPUID_EXT2_SYSCALL,
1176 .features[FEAT_8000_0001_ECX] =
1177 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1178 .features[FEAT_7_0_EBX] =
1179 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1180 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1181 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1182 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1183 CPUID_7_0_EBX_SMAP,
1184 .features[FEAT_XSAVE] =
1185 CPUID_XSAVE_XSAVEOPT,
1186 .features[FEAT_6_EAX] =
1187 CPUID_6_EAX_ARAT,
1188 .xlevel = 0x80000008,
1189 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1192 .name = "Broadwell",
1193 .level = 0xd,
1194 .vendor = CPUID_VENDOR_INTEL,
1195 .family = 6,
1196 .model = 61,
1197 .stepping = 2,
1198 .features[FEAT_1_EDX] =
1199 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1200 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1201 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1202 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1203 CPUID_DE | CPUID_FP87,
1204 .features[FEAT_1_ECX] =
1205 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1206 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1207 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1208 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1209 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1210 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1211 .features[FEAT_8000_0001_EDX] =
1212 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1213 CPUID_EXT2_SYSCALL,
1214 .features[FEAT_8000_0001_ECX] =
1215 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1216 .features[FEAT_7_0_EBX] =
1217 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1218 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1219 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1220 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1221 CPUID_7_0_EBX_SMAP,
1222 .features[FEAT_XSAVE] =
1223 CPUID_XSAVE_XSAVEOPT,
1224 .features[FEAT_6_EAX] =
1225 CPUID_6_EAX_ARAT,
1226 .xlevel = 0x80000008,
1227 .model_id = "Intel Core Processor (Broadwell)",
1230 .name = "Opteron_G1",
1231 .level = 5,
1232 .vendor = CPUID_VENDOR_AMD,
1233 .family = 15,
1234 .model = 6,
1235 .stepping = 1,
1236 .features[FEAT_1_EDX] =
1237 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1238 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1239 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1240 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1241 CPUID_DE | CPUID_FP87,
1242 .features[FEAT_1_ECX] =
1243 CPUID_EXT_SSE3,
1244 .features[FEAT_8000_0001_EDX] =
1245 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1246 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1247 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1248 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1249 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1250 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1251 .xlevel = 0x80000008,
1252 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1255 .name = "Opteron_G2",
1256 .level = 5,
1257 .vendor = CPUID_VENDOR_AMD,
1258 .family = 15,
1259 .model = 6,
1260 .stepping = 1,
1261 .features[FEAT_1_EDX] =
1262 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1263 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1264 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1265 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1266 CPUID_DE | CPUID_FP87,
1267 .features[FEAT_1_ECX] =
1268 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1269 /* Missing: CPUID_EXT2_RDTSCP */
1270 .features[FEAT_8000_0001_EDX] =
1271 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1272 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1273 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1274 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1275 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1276 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1277 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1278 .features[FEAT_8000_0001_ECX] =
1279 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1280 .xlevel = 0x80000008,
1281 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1284 .name = "Opteron_G3",
1285 .level = 5,
1286 .vendor = CPUID_VENDOR_AMD,
1287 .family = 15,
1288 .model = 6,
1289 .stepping = 1,
1290 .features[FEAT_1_EDX] =
1291 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1292 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1293 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1294 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1295 CPUID_DE | CPUID_FP87,
1296 .features[FEAT_1_ECX] =
1297 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1298 CPUID_EXT_SSE3,
1299 /* Missing: CPUID_EXT2_RDTSCP */
1300 .features[FEAT_8000_0001_EDX] =
1301 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1302 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1303 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1304 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1305 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1306 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1307 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1308 .features[FEAT_8000_0001_ECX] =
1309 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1310 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1311 .xlevel = 0x80000008,
1312 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1315 .name = "Opteron_G4",
1316 .level = 0xd,
1317 .vendor = CPUID_VENDOR_AMD,
1318 .family = 21,
1319 .model = 1,
1320 .stepping = 2,
1321 .features[FEAT_1_EDX] =
1322 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1323 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1324 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1325 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1326 CPUID_DE | CPUID_FP87,
1327 .features[FEAT_1_ECX] =
1328 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1329 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1330 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1331 CPUID_EXT_SSE3,
1332 /* Missing: CPUID_EXT2_RDTSCP */
1333 .features[FEAT_8000_0001_EDX] =
1334 CPUID_EXT2_LM |
1335 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1336 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1337 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1338 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1339 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1340 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1341 .features[FEAT_8000_0001_ECX] =
1342 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1343 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1344 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1345 CPUID_EXT3_LAHF_LM,
1346 /* no xsaveopt! */
1347 .xlevel = 0x8000001A,
1348 .model_id = "AMD Opteron 62xx class CPU",
1351 .name = "Opteron_G5",
1352 .level = 0xd,
1353 .vendor = CPUID_VENDOR_AMD,
1354 .family = 21,
1355 .model = 2,
1356 .stepping = 0,
1357 .features[FEAT_1_EDX] =
1358 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1359 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1360 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1361 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1362 CPUID_DE | CPUID_FP87,
1363 .features[FEAT_1_ECX] =
1364 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1365 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1366 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1367 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1368 /* Missing: CPUID_EXT2_RDTSCP */
1369 .features[FEAT_8000_0001_EDX] =
1370 CPUID_EXT2_LM |
1371 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1372 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1373 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1374 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1375 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1376 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1377 .features[FEAT_8000_0001_ECX] =
1378 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1379 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1380 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1381 CPUID_EXT3_LAHF_LM,
1382 /* no xsaveopt! */
1383 .xlevel = 0x8000001A,
1384 .model_id = "AMD Opteron 63xx class CPU",
1388 typedef struct PropValue {
1389 const char *prop, *value;
1390 } PropValue;
1392 /* KVM-specific features that are automatically added/removed
1393 * from all CPU models when KVM is enabled.
1395 static PropValue kvm_default_props[] = {
1396 { "kvmclock", "on" },
1397 { "kvm-nopiodelay", "on" },
1398 { "kvm-asyncpf", "on" },
1399 { "kvm-steal-time", "on" },
1400 { "kvm-pv-eoi", "on" },
1401 { "kvmclock-stable-bit", "on" },
1402 { "x2apic", "on" },
1403 { "acpi", "off" },
1404 { "monitor", "off" },
1405 { "svm", "off" },
1406 { NULL, NULL },
1409 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1411 PropValue *pv;
1412 for (pv = kvm_default_props; pv->prop; pv++) {
1413 if (!strcmp(pv->prop, prop)) {
1414 pv->value = value;
1415 break;
1419 /* It is valid to call this function only for properties that
1420 * are already present in the kvm_default_props table.
1422 assert(pv->prop);
1425 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1426 bool migratable_only);
1428 #ifdef CONFIG_KVM
1430 static int cpu_x86_fill_model_id(char *str)
1432 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1433 int i;
1435 for (i = 0; i < 3; i++) {
1436 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1437 memcpy(str + i * 16 + 0, &eax, 4);
1438 memcpy(str + i * 16 + 4, &ebx, 4);
1439 memcpy(str + i * 16 + 8, &ecx, 4);
1440 memcpy(str + i * 16 + 12, &edx, 4);
1442 return 0;
1445 static X86CPUDefinition host_cpudef;
1447 static Property host_x86_cpu_properties[] = {
1448 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1449 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1450 DEFINE_PROP_END_OF_LIST()
1453 /* class_init for the "host" CPU model
1455 * This function may be called before KVM is initialized.
1457 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1459 DeviceClass *dc = DEVICE_CLASS(oc);
1460 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1461 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1463 xcc->kvm_required = true;
1465 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1466 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1468 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1469 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1470 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1471 host_cpudef.stepping = eax & 0x0F;
1473 cpu_x86_fill_model_id(host_cpudef.model_id);
1475 xcc->cpu_def = &host_cpudef;
1477 /* level, xlevel, xlevel2, and the feature words are initialized on
1478 * instance_init, because they require KVM to be initialized.
1481 dc->props = host_x86_cpu_properties;
1482 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1483 dc->cannot_destroy_with_object_finalize_yet = true;
1486 static void host_x86_cpu_initfn(Object *obj)
1488 X86CPU *cpu = X86_CPU(obj);
1489 CPUX86State *env = &cpu->env;
1490 KVMState *s = kvm_state;
1492 assert(kvm_enabled());
1494 /* We can't fill the features array here because we don't know yet if
1495 * "migratable" is true or false.
1497 cpu->host_features = true;
1499 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1500 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1501 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1503 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1506 static const TypeInfo host_x86_cpu_type_info = {
1507 .name = X86_CPU_TYPE_NAME("host"),
1508 .parent = TYPE_X86_CPU,
1509 .instance_init = host_x86_cpu_initfn,
1510 .class_init = host_x86_cpu_class_init,
1513 #endif
1515 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1517 FeatureWordInfo *f = &feature_word_info[w];
1518 int i;
1520 for (i = 0; i < 32; ++i) {
1521 if ((1UL << i) & mask) {
1522 const char *reg = get_register_name_32(f->cpuid_reg);
1523 assert(reg);
1524 fprintf(stderr, "warning: %s doesn't support requested feature: "
1525 "CPUID.%02XH:%s%s%s [bit %d]\n",
1526 kvm_enabled() ? "host" : "TCG",
1527 f->cpuid_eax, reg,
1528 f->feat_names[i] ? "." : "",
1529 f->feat_names[i] ? f->feat_names[i] : "", i);
1534 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1535 const char *name, void *opaque,
1536 Error **errp)
1538 X86CPU *cpu = X86_CPU(obj);
1539 CPUX86State *env = &cpu->env;
1540 int64_t value;
1542 value = (env->cpuid_version >> 8) & 0xf;
1543 if (value == 0xf) {
1544 value += (env->cpuid_version >> 20) & 0xff;
1546 visit_type_int(v, name, &value, errp);
1549 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1550 const char *name, void *opaque,
1551 Error **errp)
1553 X86CPU *cpu = X86_CPU(obj);
1554 CPUX86State *env = &cpu->env;
1555 const int64_t min = 0;
1556 const int64_t max = 0xff + 0xf;
1557 Error *local_err = NULL;
1558 int64_t value;
1560 visit_type_int(v, name, &value, &local_err);
1561 if (local_err) {
1562 error_propagate(errp, local_err);
1563 return;
1565 if (value < min || value > max) {
1566 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1567 name ? name : "null", value, min, max);
1568 return;
1571 env->cpuid_version &= ~0xff00f00;
1572 if (value > 0x0f) {
1573 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1574 } else {
1575 env->cpuid_version |= value << 8;
1579 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1580 const char *name, void *opaque,
1581 Error **errp)
1583 X86CPU *cpu = X86_CPU(obj);
1584 CPUX86State *env = &cpu->env;
1585 int64_t value;
1587 value = (env->cpuid_version >> 4) & 0xf;
1588 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1589 visit_type_int(v, name, &value, errp);
1592 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1593 const char *name, void *opaque,
1594 Error **errp)
1596 X86CPU *cpu = X86_CPU(obj);
1597 CPUX86State *env = &cpu->env;
1598 const int64_t min = 0;
1599 const int64_t max = 0xff;
1600 Error *local_err = NULL;
1601 int64_t value;
1603 visit_type_int(v, name, &value, &local_err);
1604 if (local_err) {
1605 error_propagate(errp, local_err);
1606 return;
1608 if (value < min || value > max) {
1609 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1610 name ? name : "null", value, min, max);
1611 return;
1614 env->cpuid_version &= ~0xf00f0;
1615 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1618 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1619 const char *name, void *opaque,
1620 Error **errp)
1622 X86CPU *cpu = X86_CPU(obj);
1623 CPUX86State *env = &cpu->env;
1624 int64_t value;
1626 value = env->cpuid_version & 0xf;
1627 visit_type_int(v, name, &value, errp);
1630 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1631 const char *name, void *opaque,
1632 Error **errp)
1634 X86CPU *cpu = X86_CPU(obj);
1635 CPUX86State *env = &cpu->env;
1636 const int64_t min = 0;
1637 const int64_t max = 0xf;
1638 Error *local_err = NULL;
1639 int64_t value;
1641 visit_type_int(v, name, &value, &local_err);
1642 if (local_err) {
1643 error_propagate(errp, local_err);
1644 return;
1646 if (value < min || value > max) {
1647 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1648 name ? name : "null", value, min, max);
1649 return;
1652 env->cpuid_version &= ~0xf;
1653 env->cpuid_version |= value & 0xf;
1656 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1658 X86CPU *cpu = X86_CPU(obj);
1659 CPUX86State *env = &cpu->env;
1660 char *value;
1662 value = g_malloc(CPUID_VENDOR_SZ + 1);
1663 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1664 env->cpuid_vendor3);
1665 return value;
1668 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1669 Error **errp)
1671 X86CPU *cpu = X86_CPU(obj);
1672 CPUX86State *env = &cpu->env;
1673 int i;
1675 if (strlen(value) != CPUID_VENDOR_SZ) {
1676 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1677 return;
1680 env->cpuid_vendor1 = 0;
1681 env->cpuid_vendor2 = 0;
1682 env->cpuid_vendor3 = 0;
1683 for (i = 0; i < 4; i++) {
1684 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1685 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1686 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1690 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 char *value;
1695 int i;
1697 value = g_malloc(48 + 1);
1698 for (i = 0; i < 48; i++) {
1699 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1701 value[48] = '\0';
1702 return value;
1705 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1706 Error **errp)
1708 X86CPU *cpu = X86_CPU(obj);
1709 CPUX86State *env = &cpu->env;
1710 int c, len, i;
1712 if (model_id == NULL) {
1713 model_id = "";
1715 len = strlen(model_id);
1716 memset(env->cpuid_model, 0, 48);
1717 for (i = 0; i < 48; i++) {
1718 if (i >= len) {
1719 c = '\0';
1720 } else {
1721 c = (uint8_t)model_id[i];
1723 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1727 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1728 void *opaque, Error **errp)
1730 X86CPU *cpu = X86_CPU(obj);
1731 int64_t value;
1733 value = cpu->env.tsc_khz * 1000;
1734 visit_type_int(v, name, &value, errp);
1737 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1738 void *opaque, Error **errp)
1740 X86CPU *cpu = X86_CPU(obj);
1741 const int64_t min = 0;
1742 const int64_t max = INT64_MAX;
1743 Error *local_err = NULL;
1744 int64_t value;
1746 visit_type_int(v, name, &value, &local_err);
1747 if (local_err) {
1748 error_propagate(errp, local_err);
1749 return;
1751 if (value < min || value > max) {
1752 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1753 name ? name : "null", value, min, max);
1754 return;
1757 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1760 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1761 void *opaque, Error **errp)
1763 X86CPU *cpu = X86_CPU(obj);
1764 int64_t value = cpu->apic_id;
1766 visit_type_int(v, name, &value, errp);
1769 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1770 void *opaque, Error **errp)
1772 X86CPU *cpu = X86_CPU(obj);
1773 DeviceState *dev = DEVICE(obj);
1774 const int64_t min = 0;
1775 const int64_t max = UINT32_MAX;
1776 Error *error = NULL;
1777 int64_t value;
1779 if (dev->realized) {
1780 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1781 "it was realized", name, object_get_typename(obj));
1782 return;
1785 visit_type_int(v, name, &value, &error);
1786 if (error) {
1787 error_propagate(errp, error);
1788 return;
1790 if (value < min || value > max) {
1791 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1792 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1793 object_get_typename(obj), name, value, min, max);
1794 return;
1797 if ((value != cpu->apic_id) && cpu_exists(value)) {
1798 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1799 return;
1801 cpu->apic_id = value;
1804 /* Generic getter for "feature-words" and "filtered-features" properties */
1805 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1806 const char *name, void *opaque,
1807 Error **errp)
1809 uint32_t *array = (uint32_t *)opaque;
1810 FeatureWord w;
1811 Error *err = NULL;
1812 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1813 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1814 X86CPUFeatureWordInfoList *list = NULL;
1816 for (w = 0; w < FEATURE_WORDS; w++) {
1817 FeatureWordInfo *wi = &feature_word_info[w];
1818 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1819 qwi->cpuid_input_eax = wi->cpuid_eax;
1820 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1821 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1822 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1823 qwi->features = array[w];
1825 /* List will be in reverse order, but order shouldn't matter */
1826 list_entries[w].next = list;
1827 list_entries[w].value = &word_infos[w];
1828 list = &list_entries[w];
1831 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1832 error_propagate(errp, err);
1835 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1836 void *opaque, Error **errp)
1838 X86CPU *cpu = X86_CPU(obj);
1839 int64_t value = cpu->hyperv_spinlock_attempts;
1841 visit_type_int(v, name, &value, errp);
1844 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1845 void *opaque, Error **errp)
1847 const int64_t min = 0xFFF;
1848 const int64_t max = UINT_MAX;
1849 X86CPU *cpu = X86_CPU(obj);
1850 Error *err = NULL;
1851 int64_t value;
1853 visit_type_int(v, name, &value, &err);
1854 if (err) {
1855 error_propagate(errp, err);
1856 return;
1859 if (value < min || value > max) {
1860 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1861 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1862 object_get_typename(obj), name ? name : "null",
1863 value, min, max);
1864 return;
1866 cpu->hyperv_spinlock_attempts = value;
1869 static PropertyInfo qdev_prop_spinlocks = {
1870 .name = "int",
1871 .get = x86_get_hv_spinlocks,
1872 .set = x86_set_hv_spinlocks,
1875 /* Convert all '_' in a feature string option name to '-', to make feature
1876 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1878 static inline void feat2prop(char *s)
1880 while ((s = strchr(s, '_'))) {
1881 *s = '-';
1885 /* Parse "+feature,-feature,feature=foo" CPU feature string
1887 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1888 Error **errp)
1890 X86CPU *cpu = X86_CPU(cs);
1891 char *featurestr; /* Single 'key=value" string being parsed */
1892 FeatureWord w;
1893 /* Features to be added */
1894 FeatureWordArray plus_features = { 0 };
1895 /* Features to be removed */
1896 FeatureWordArray minus_features = { 0 };
1897 uint32_t numvalue;
1898 CPUX86State *env = &cpu->env;
1899 Error *local_err = NULL;
1901 featurestr = features ? strtok(features, ",") : NULL;
1903 while (featurestr) {
1904 char *val;
1905 if (featurestr[0] == '+') {
1906 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1907 } else if (featurestr[0] == '-') {
1908 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1909 } else if ((val = strchr(featurestr, '='))) {
1910 *val = 0; val++;
1911 feat2prop(featurestr);
1912 if (!strcmp(featurestr, "xlevel")) {
1913 char *err;
1914 char num[32];
1916 numvalue = strtoul(val, &err, 0);
1917 if (!*val || *err) {
1918 error_setg(errp, "bad numerical value %s", val);
1919 return;
1921 if (numvalue < 0x80000000) {
1922 error_report("xlevel value shall always be >= 0x80000000"
1923 ", fixup will be removed in future versions");
1924 numvalue += 0x80000000;
1926 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1927 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1928 } else if (!strcmp(featurestr, "tsc-freq")) {
1929 int64_t tsc_freq;
1930 char *err;
1931 char num[32];
1933 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1934 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1935 if (tsc_freq < 0 || *err) {
1936 error_setg(errp, "bad numerical value %s", val);
1937 return;
1939 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1940 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1941 &local_err);
1942 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1943 char *err;
1944 const int min = 0xFFF;
1945 char num[32];
1946 numvalue = strtoul(val, &err, 0);
1947 if (!*val || *err) {
1948 error_setg(errp, "bad numerical value %s", val);
1949 return;
1951 if (numvalue < min) {
1952 error_report("hv-spinlocks value shall always be >= 0x%x"
1953 ", fixup will be removed in future versions",
1954 min);
1955 numvalue = min;
1957 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1958 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1959 } else {
1960 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1962 } else {
1963 feat2prop(featurestr);
1964 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1966 if (local_err) {
1967 error_propagate(errp, local_err);
1968 return;
1970 featurestr = strtok(NULL, ",");
1973 if (cpu->host_features) {
1974 for (w = 0; w < FEATURE_WORDS; w++) {
1975 env->features[w] =
1976 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1980 for (w = 0; w < FEATURE_WORDS; w++) {
1981 env->features[w] |= plus_features[w];
1982 env->features[w] &= ~minus_features[w];
1986 /* Print all cpuid feature names in featureset
1988 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1990 int bit;
1991 bool first = true;
1993 for (bit = 0; bit < 32; bit++) {
1994 if (featureset[bit]) {
1995 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1996 first = false;
2001 /* generate CPU information. */
2002 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2004 X86CPUDefinition *def;
2005 char buf[256];
2006 int i;
2008 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2009 def = &builtin_x86_defs[i];
2010 snprintf(buf, sizeof(buf), "%s", def->name);
2011 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2013 #ifdef CONFIG_KVM
2014 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2015 "KVM processor with all supported host features "
2016 "(only available in KVM mode)");
2017 #endif
2019 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2020 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2021 FeatureWordInfo *fw = &feature_word_info[i];
2023 (*cpu_fprintf)(f, " ");
2024 listflags(f, cpu_fprintf, fw->feat_names);
2025 (*cpu_fprintf)(f, "\n");
2029 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2031 CpuDefinitionInfoList *cpu_list = NULL;
2032 X86CPUDefinition *def;
2033 int i;
2035 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2036 CpuDefinitionInfoList *entry;
2037 CpuDefinitionInfo *info;
2039 def = &builtin_x86_defs[i];
2040 info = g_malloc0(sizeof(*info));
2041 info->name = g_strdup(def->name);
2043 entry = g_malloc0(sizeof(*entry));
2044 entry->value = info;
2045 entry->next = cpu_list;
2046 cpu_list = entry;
2049 return cpu_list;
2052 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2053 bool migratable_only)
2055 FeatureWordInfo *wi = &feature_word_info[w];
2056 uint32_t r;
2058 if (kvm_enabled()) {
2059 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2060 wi->cpuid_ecx,
2061 wi->cpuid_reg);
2062 } else if (tcg_enabled()) {
2063 r = wi->tcg_features;
2064 } else {
2065 return ~0;
2067 if (migratable_only) {
2068 r &= x86_cpu_get_migratable_flags(w);
2070 return r;
2074 * Filters CPU feature words based on host availability of each feature.
2076 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2078 static int x86_cpu_filter_features(X86CPU *cpu)
2080 CPUX86State *env = &cpu->env;
2081 FeatureWord w;
2082 int rv = 0;
2084 for (w = 0; w < FEATURE_WORDS; w++) {
2085 uint32_t host_feat =
2086 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2087 uint32_t requested_features = env->features[w];
2088 env->features[w] &= host_feat;
2089 cpu->filtered_features[w] = requested_features & ~env->features[w];
2090 if (cpu->filtered_features[w]) {
2091 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2092 report_unavailable_features(w, cpu->filtered_features[w]);
2094 rv = 1;
2098 return rv;
2101 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2103 PropValue *pv;
2104 for (pv = props; pv->prop; pv++) {
2105 if (!pv->value) {
2106 continue;
2108 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2109 &error_abort);
2113 /* Load data from X86CPUDefinition
2115 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2117 CPUX86State *env = &cpu->env;
2118 const char *vendor;
2119 char host_vendor[CPUID_VENDOR_SZ + 1];
2120 FeatureWord w;
2122 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2123 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2124 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2125 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2126 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2127 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2128 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2129 for (w = 0; w < FEATURE_WORDS; w++) {
2130 env->features[w] = def->features[w];
2133 /* Special cases not set in the X86CPUDefinition structs: */
2134 if (kvm_enabled()) {
2135 x86_cpu_apply_props(cpu, kvm_default_props);
2138 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2140 /* sysenter isn't supported in compatibility mode on AMD,
2141 * syscall isn't supported in compatibility mode on Intel.
2142 * Normally we advertise the actual CPU vendor, but you can
2143 * override this using the 'vendor' property if you want to use
2144 * KVM's sysenter/syscall emulation in compatibility mode and
2145 * when doing cross vendor migration
2147 vendor = def->vendor;
2148 if (kvm_enabled()) {
2149 uint32_t ebx = 0, ecx = 0, edx = 0;
2150 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2151 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2152 vendor = host_vendor;
2155 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2159 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2161 X86CPU *cpu = NULL;
2162 X86CPUClass *xcc;
2163 ObjectClass *oc;
2164 gchar **model_pieces;
2165 char *name, *features;
2166 Error *error = NULL;
2168 model_pieces = g_strsplit(cpu_model, ",", 2);
2169 if (!model_pieces[0]) {
2170 error_setg(&error, "Invalid/empty CPU model name");
2171 goto out;
2173 name = model_pieces[0];
2174 features = model_pieces[1];
2176 oc = x86_cpu_class_by_name(name);
2177 if (oc == NULL) {
2178 error_setg(&error, "Unable to find CPU definition: %s", name);
2179 goto out;
2181 xcc = X86_CPU_CLASS(oc);
2183 if (xcc->kvm_required && !kvm_enabled()) {
2184 error_setg(&error, "CPU model '%s' requires KVM", name);
2185 goto out;
2188 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2190 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2191 if (error) {
2192 goto out;
2195 out:
2196 if (error != NULL) {
2197 error_propagate(errp, error);
2198 if (cpu) {
2199 object_unref(OBJECT(cpu));
2200 cpu = NULL;
2203 g_strfreev(model_pieces);
2204 return cpu;
2207 X86CPU *cpu_x86_init(const char *cpu_model)
2209 Error *error = NULL;
2210 X86CPU *cpu;
2212 cpu = cpu_x86_create(cpu_model, &error);
2213 if (error) {
2214 goto out;
2217 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2219 out:
2220 if (error) {
2221 error_report_err(error);
2222 if (cpu != NULL) {
2223 object_unref(OBJECT(cpu));
2224 cpu = NULL;
2227 return cpu;
2230 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2232 X86CPUDefinition *cpudef = data;
2233 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2235 xcc->cpu_def = cpudef;
2238 static void x86_register_cpudef_type(X86CPUDefinition *def)
2240 char *typename = x86_cpu_type_name(def->name);
2241 TypeInfo ti = {
2242 .name = typename,
2243 .parent = TYPE_X86_CPU,
2244 .class_init = x86_cpu_cpudef_class_init,
2245 .class_data = def,
2248 type_register(&ti);
2249 g_free(typename);
2252 #if !defined(CONFIG_USER_ONLY)
2254 void cpu_clear_apic_feature(CPUX86State *env)
2256 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2259 #endif /* !CONFIG_USER_ONLY */
2261 /* Initialize list of CPU models, filling some non-static fields if necessary
2263 void x86_cpudef_setup(void)
2265 int i, j;
2266 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2268 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2269 X86CPUDefinition *def = &builtin_x86_defs[i];
2271 /* Look for specific "cpudef" models that */
2272 /* have the QEMU version in .model_id */
2273 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2274 if (strcmp(model_with_versions[j], def->name) == 0) {
2275 pstrcpy(def->model_id, sizeof(def->model_id),
2276 "QEMU Virtual CPU version ");
2277 pstrcat(def->model_id, sizeof(def->model_id),
2278 qemu_hw_version());
2279 break;
2285 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2286 uint32_t *eax, uint32_t *ebx,
2287 uint32_t *ecx, uint32_t *edx)
2289 X86CPU *cpu = x86_env_get_cpu(env);
2290 CPUState *cs = CPU(cpu);
2292 /* test if maximum index reached */
2293 if (index & 0x80000000) {
2294 if (index > env->cpuid_xlevel) {
2295 if (env->cpuid_xlevel2 > 0) {
2296 /* Handle the Centaur's CPUID instruction. */
2297 if (index > env->cpuid_xlevel2) {
2298 index = env->cpuid_xlevel2;
2299 } else if (index < 0xC0000000) {
2300 index = env->cpuid_xlevel;
2302 } else {
2303 /* Intel documentation states that invalid EAX input will
2304 * return the same information as EAX=cpuid_level
2305 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2307 index = env->cpuid_level;
2310 } else {
2311 if (index > env->cpuid_level)
2312 index = env->cpuid_level;
2315 switch(index) {
2316 case 0:
2317 *eax = env->cpuid_level;
2318 *ebx = env->cpuid_vendor1;
2319 *edx = env->cpuid_vendor2;
2320 *ecx = env->cpuid_vendor3;
2321 break;
2322 case 1:
2323 *eax = env->cpuid_version;
2324 *ebx = (cpu->apic_id << 24) |
2325 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2326 *ecx = env->features[FEAT_1_ECX];
2327 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2328 *ecx |= CPUID_EXT_OSXSAVE;
2330 *edx = env->features[FEAT_1_EDX];
2331 if (cs->nr_cores * cs->nr_threads > 1) {
2332 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2333 *edx |= CPUID_HT;
2335 break;
2336 case 2:
2337 /* cache info: needed for Pentium Pro compatibility */
2338 if (cpu->cache_info_passthrough) {
2339 host_cpuid(index, 0, eax, ebx, ecx, edx);
2340 break;
2342 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2343 *ebx = 0;
2344 *ecx = 0;
2345 *edx = (L1D_DESCRIPTOR << 16) | \
2346 (L1I_DESCRIPTOR << 8) | \
2347 (L2_DESCRIPTOR);
2348 break;
2349 case 4:
2350 /* cache info: needed for Core compatibility */
2351 if (cpu->cache_info_passthrough) {
2352 host_cpuid(index, count, eax, ebx, ecx, edx);
2353 *eax &= ~0xFC000000;
2354 } else {
2355 *eax = 0;
2356 switch (count) {
2357 case 0: /* L1 dcache info */
2358 *eax |= CPUID_4_TYPE_DCACHE | \
2359 CPUID_4_LEVEL(1) | \
2360 CPUID_4_SELF_INIT_LEVEL;
2361 *ebx = (L1D_LINE_SIZE - 1) | \
2362 ((L1D_PARTITIONS - 1) << 12) | \
2363 ((L1D_ASSOCIATIVITY - 1) << 22);
2364 *ecx = L1D_SETS - 1;
2365 *edx = CPUID_4_NO_INVD_SHARING;
2366 break;
2367 case 1: /* L1 icache info */
2368 *eax |= CPUID_4_TYPE_ICACHE | \
2369 CPUID_4_LEVEL(1) | \
2370 CPUID_4_SELF_INIT_LEVEL;
2371 *ebx = (L1I_LINE_SIZE - 1) | \
2372 ((L1I_PARTITIONS - 1) << 12) | \
2373 ((L1I_ASSOCIATIVITY - 1) << 22);
2374 *ecx = L1I_SETS - 1;
2375 *edx = CPUID_4_NO_INVD_SHARING;
2376 break;
2377 case 2: /* L2 cache info */
2378 *eax |= CPUID_4_TYPE_UNIFIED | \
2379 CPUID_4_LEVEL(2) | \
2380 CPUID_4_SELF_INIT_LEVEL;
2381 if (cs->nr_threads > 1) {
2382 *eax |= (cs->nr_threads - 1) << 14;
2384 *ebx = (L2_LINE_SIZE - 1) | \
2385 ((L2_PARTITIONS - 1) << 12) | \
2386 ((L2_ASSOCIATIVITY - 1) << 22);
2387 *ecx = L2_SETS - 1;
2388 *edx = CPUID_4_NO_INVD_SHARING;
2389 break;
2390 default: /* end of info */
2391 *eax = 0;
2392 *ebx = 0;
2393 *ecx = 0;
2394 *edx = 0;
2395 break;
2399 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2400 if ((*eax & 31) && cs->nr_cores > 1) {
2401 *eax |= (cs->nr_cores - 1) << 26;
2403 break;
2404 case 5:
2405 /* mwait info: needed for Core compatibility */
2406 *eax = 0; /* Smallest monitor-line size in bytes */
2407 *ebx = 0; /* Largest monitor-line size in bytes */
2408 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2409 *edx = 0;
2410 break;
2411 case 6:
2412 /* Thermal and Power Leaf */
2413 *eax = env->features[FEAT_6_EAX];
2414 *ebx = 0;
2415 *ecx = 0;
2416 *edx = 0;
2417 break;
2418 case 7:
2419 /* Structured Extended Feature Flags Enumeration Leaf */
2420 if (count == 0) {
2421 *eax = 0; /* Maximum ECX value for sub-leaves */
2422 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2423 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2424 *edx = 0; /* Reserved */
2425 } else {
2426 *eax = 0;
2427 *ebx = 0;
2428 *ecx = 0;
2429 *edx = 0;
2431 break;
2432 case 9:
2433 /* Direct Cache Access Information Leaf */
2434 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2435 *ebx = 0;
2436 *ecx = 0;
2437 *edx = 0;
2438 break;
2439 case 0xA:
2440 /* Architectural Performance Monitoring Leaf */
2441 if (kvm_enabled() && cpu->enable_pmu) {
2442 KVMState *s = cs->kvm_state;
2444 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2445 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2446 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2447 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2448 } else {
2449 *eax = 0;
2450 *ebx = 0;
2451 *ecx = 0;
2452 *edx = 0;
2454 break;
2455 case 0xD: {
2456 KVMState *s = cs->kvm_state;
2457 uint64_t ena_mask;
2458 int i;
2460 /* Processor Extended State */
2461 *eax = 0;
2462 *ebx = 0;
2463 *ecx = 0;
2464 *edx = 0;
2465 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2466 break;
2468 if (kvm_enabled()) {
2469 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2470 ena_mask <<= 32;
2471 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2472 } else {
2473 ena_mask = -1;
2476 if (count == 0) {
2477 *ecx = 0x240;
2478 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2479 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2480 if ((env->features[esa->feature] & esa->bits) == esa->bits
2481 && ((ena_mask >> i) & 1) != 0) {
2482 if (i < 32) {
2483 *eax |= 1u << i;
2484 } else {
2485 *edx |= 1u << (i - 32);
2487 *ecx = MAX(*ecx, esa->offset + esa->size);
2490 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2491 *ebx = *ecx;
2492 } else if (count == 1) {
2493 *eax = env->features[FEAT_XSAVE];
2494 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2495 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2496 if ((env->features[esa->feature] & esa->bits) == esa->bits
2497 && ((ena_mask >> count) & 1) != 0) {
2498 *eax = esa->size;
2499 *ebx = esa->offset;
2502 break;
2504 case 0x80000000:
2505 *eax = env->cpuid_xlevel;
2506 *ebx = env->cpuid_vendor1;
2507 *edx = env->cpuid_vendor2;
2508 *ecx = env->cpuid_vendor3;
2509 break;
2510 case 0x80000001:
2511 *eax = env->cpuid_version;
2512 *ebx = 0;
2513 *ecx = env->features[FEAT_8000_0001_ECX];
2514 *edx = env->features[FEAT_8000_0001_EDX];
2516 /* The Linux kernel checks for the CMPLegacy bit and
2517 * discards multiple thread information if it is set.
2518 * So dont set it here for Intel to make Linux guests happy.
2520 if (cs->nr_cores * cs->nr_threads > 1) {
2521 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2522 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2523 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2524 *ecx |= 1 << 1; /* CmpLegacy bit */
2527 break;
2528 case 0x80000002:
2529 case 0x80000003:
2530 case 0x80000004:
2531 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2532 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2533 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2534 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2535 break;
2536 case 0x80000005:
2537 /* cache info (L1 cache) */
2538 if (cpu->cache_info_passthrough) {
2539 host_cpuid(index, 0, eax, ebx, ecx, edx);
2540 break;
2542 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2543 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2544 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2545 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2546 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2547 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2548 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2549 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2550 break;
2551 case 0x80000006:
2552 /* cache info (L2 cache) */
2553 if (cpu->cache_info_passthrough) {
2554 host_cpuid(index, 0, eax, ebx, ecx, edx);
2555 break;
2557 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2558 (L2_DTLB_2M_ENTRIES << 16) | \
2559 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2560 (L2_ITLB_2M_ENTRIES);
2561 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2562 (L2_DTLB_4K_ENTRIES << 16) | \
2563 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2564 (L2_ITLB_4K_ENTRIES);
2565 *ecx = (L2_SIZE_KB_AMD << 16) | \
2566 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2567 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2568 *edx = ((L3_SIZE_KB/512) << 18) | \
2569 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2570 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2571 break;
2572 case 0x80000007:
2573 *eax = 0;
2574 *ebx = 0;
2575 *ecx = 0;
2576 *edx = env->features[FEAT_8000_0007_EDX];
2577 break;
2578 case 0x80000008:
2579 /* virtual & phys address size in low 2 bytes. */
2580 /* XXX: This value must match the one used in the MMU code. */
2581 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2582 /* 64 bit processor */
2583 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2584 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2585 } else {
2586 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2587 *eax = 0x00000024; /* 36 bits physical */
2588 } else {
2589 *eax = 0x00000020; /* 32 bits physical */
2592 *ebx = 0;
2593 *ecx = 0;
2594 *edx = 0;
2595 if (cs->nr_cores * cs->nr_threads > 1) {
2596 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2598 break;
2599 case 0x8000000A:
2600 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2601 *eax = 0x00000001; /* SVM Revision */
2602 *ebx = 0x00000010; /* nr of ASIDs */
2603 *ecx = 0;
2604 *edx = env->features[FEAT_SVM]; /* optional features */
2605 } else {
2606 *eax = 0;
2607 *ebx = 0;
2608 *ecx = 0;
2609 *edx = 0;
2611 break;
2612 case 0xC0000000:
2613 *eax = env->cpuid_xlevel2;
2614 *ebx = 0;
2615 *ecx = 0;
2616 *edx = 0;
2617 break;
2618 case 0xC0000001:
2619 /* Support for VIA CPU's CPUID instruction */
2620 *eax = env->cpuid_version;
2621 *ebx = 0;
2622 *ecx = 0;
2623 *edx = env->features[FEAT_C000_0001_EDX];
2624 break;
2625 case 0xC0000002:
2626 case 0xC0000003:
2627 case 0xC0000004:
2628 /* Reserved for the future, and now filled with zero */
2629 *eax = 0;
2630 *ebx = 0;
2631 *ecx = 0;
2632 *edx = 0;
2633 break;
2634 default:
2635 /* reserved values: zero */
2636 *eax = 0;
2637 *ebx = 0;
2638 *ecx = 0;
2639 *edx = 0;
2640 break;
2644 /* CPUClass::reset() */
2645 static void x86_cpu_reset(CPUState *s)
2647 X86CPU *cpu = X86_CPU(s);
2648 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2649 CPUX86State *env = &cpu->env;
2650 target_ulong cr4;
2651 uint64_t xcr0;
2652 int i;
2654 xcc->parent_reset(s);
2656 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2658 tlb_flush(s, 1);
2660 env->old_exception = -1;
2662 /* init to reset state */
2664 #ifdef CONFIG_SOFTMMU
2665 env->hflags |= HF_SOFTMMU_MASK;
2666 #endif
2667 env->hflags2 |= HF2_GIF_MASK;
2669 cpu_x86_update_cr0(env, 0x60000010);
2670 env->a20_mask = ~0x0;
2671 env->smbase = 0x30000;
2673 env->idt.limit = 0xffff;
2674 env->gdt.limit = 0xffff;
2675 env->ldt.limit = 0xffff;
2676 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2677 env->tr.limit = 0xffff;
2678 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2680 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2681 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2682 DESC_R_MASK | DESC_A_MASK);
2683 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2684 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2685 DESC_A_MASK);
2686 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2687 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2688 DESC_A_MASK);
2689 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2690 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2691 DESC_A_MASK);
2692 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2693 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2694 DESC_A_MASK);
2695 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2696 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2697 DESC_A_MASK);
2699 env->eip = 0xfff0;
2700 env->regs[R_EDX] = env->cpuid_version;
2702 env->eflags = 0x2;
2704 /* FPU init */
2705 for (i = 0; i < 8; i++) {
2706 env->fptags[i] = 1;
2708 cpu_set_fpuc(env, 0x37f);
2710 env->mxcsr = 0x1f80;
2711 /* All units are in INIT state. */
2712 env->xstate_bv = 0;
2714 env->pat = 0x0007040600070406ULL;
2715 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2717 memset(env->dr, 0, sizeof(env->dr));
2718 env->dr[6] = DR6_FIXED_1;
2719 env->dr[7] = DR7_FIXED_1;
2720 cpu_breakpoint_remove_all(s, BP_CPU);
2721 cpu_watchpoint_remove_all(s, BP_CPU);
2723 cr4 = 0;
2724 xcr0 = XSTATE_FP_MASK;
2726 #ifdef CONFIG_USER_ONLY
2727 /* Enable all the features for user-mode. */
2728 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2729 xcr0 |= XSTATE_SSE_MASK;
2731 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) {
2732 xcr0 |= XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK;
2734 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2735 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2737 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2738 cr4 |= CR4_FSGSBASE_MASK;
2740 #endif
2742 env->xcr0 = xcr0;
2743 cpu_x86_update_cr4(env, cr4);
2746 * SDM 11.11.5 requires:
2747 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2748 * - IA32_MTRR_PHYSMASKn.V = 0
2749 * All other bits are undefined. For simplification, zero it all.
2751 env->mtrr_deftype = 0;
2752 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2753 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2755 #if !defined(CONFIG_USER_ONLY)
2756 /* We hard-wire the BSP to the first CPU. */
2757 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2759 s->halted = !cpu_is_bsp(cpu);
2761 if (kvm_enabled()) {
2762 kvm_arch_reset_vcpu(cpu);
2764 #endif
2767 #ifndef CONFIG_USER_ONLY
2768 bool cpu_is_bsp(X86CPU *cpu)
2770 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2773 /* TODO: remove me, when reset over QOM tree is implemented */
2774 static void x86_cpu_machine_reset_cb(void *opaque)
2776 X86CPU *cpu = opaque;
2777 cpu_reset(CPU(cpu));
2779 #endif
2781 static void mce_init(X86CPU *cpu)
2783 CPUX86State *cenv = &cpu->env;
2784 unsigned int bank;
2786 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2787 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2788 (CPUID_MCE | CPUID_MCA)) {
2789 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2790 cenv->mcg_ctl = ~(uint64_t)0;
2791 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2792 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2797 #ifndef CONFIG_USER_ONLY
2798 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2800 APICCommonState *apic;
2801 const char *apic_type = "apic";
2803 if (kvm_apic_in_kernel()) {
2804 apic_type = "kvm-apic";
2805 } else if (xen_enabled()) {
2806 apic_type = "xen-apic";
2809 cpu->apic_state = DEVICE(object_new(apic_type));
2811 object_property_add_child(OBJECT(cpu), "apic",
2812 OBJECT(cpu->apic_state), NULL);
2813 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2814 /* TODO: convert to link<> */
2815 apic = APIC_COMMON(cpu->apic_state);
2816 apic->cpu = cpu;
2817 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2820 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2822 APICCommonState *apic;
2823 static bool apic_mmio_map_once;
2825 if (cpu->apic_state == NULL) {
2826 return;
2828 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2829 errp);
2831 /* Map APIC MMIO area */
2832 apic = APIC_COMMON(cpu->apic_state);
2833 if (!apic_mmio_map_once) {
2834 memory_region_add_subregion_overlap(get_system_memory(),
2835 apic->apicbase &
2836 MSR_IA32_APICBASE_BASE,
2837 &apic->io_memory,
2838 0x1000);
2839 apic_mmio_map_once = true;
2843 static void x86_cpu_machine_done(Notifier *n, void *unused)
2845 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2846 MemoryRegion *smram =
2847 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2849 if (smram) {
2850 cpu->smram = g_new(MemoryRegion, 1);
2851 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2852 smram, 0, 1ull << 32);
2853 memory_region_set_enabled(cpu->smram, false);
2854 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2857 #else
2858 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2861 #endif
2864 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2865 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2866 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2867 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2868 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2869 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2870 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2872 CPUState *cs = CPU(dev);
2873 X86CPU *cpu = X86_CPU(dev);
2874 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2875 CPUX86State *env = &cpu->env;
2876 Error *local_err = NULL;
2877 static bool ht_warned;
2879 if (cpu->apic_id < 0) {
2880 error_setg(errp, "apic-id property was not initialized properly");
2881 return;
2884 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2885 env->cpuid_level = 7;
2888 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2889 * CPUID[1].EDX.
2891 if (IS_AMD_CPU(env)) {
2892 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2893 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2894 & CPUID_EXT2_AMD_ALIASES);
2898 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2899 error_setg(&local_err,
2900 kvm_enabled() ?
2901 "Host doesn't support requested features" :
2902 "TCG doesn't support requested features");
2903 goto out;
2906 #ifndef CONFIG_USER_ONLY
2907 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2909 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2910 x86_cpu_apic_create(cpu, &local_err);
2911 if (local_err != NULL) {
2912 goto out;
2915 #endif
2917 mce_init(cpu);
2919 #ifndef CONFIG_USER_ONLY
2920 if (tcg_enabled()) {
2921 AddressSpace *newas = g_new(AddressSpace, 1);
2923 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2924 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2926 /* Outer container... */
2927 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2928 memory_region_set_enabled(cpu->cpu_as_root, true);
2930 /* ... with two regions inside: normal system memory with low
2931 * priority, and...
2933 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2934 get_system_memory(), 0, ~0ull);
2935 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2936 memory_region_set_enabled(cpu->cpu_as_mem, true);
2937 address_space_init(newas, cpu->cpu_as_root, "CPU");
2938 cs->num_ases = 1;
2939 cpu_address_space_init(cs, newas, 0);
2941 /* ... SMRAM with higher priority, linked from /machine/smram. */
2942 cpu->machine_done.notify = x86_cpu_machine_done;
2943 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2945 #endif
2947 qemu_init_vcpu(cs);
2949 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2950 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2951 * based on inputs (sockets,cores,threads), it is still better to gives
2952 * users a warning.
2954 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2955 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2957 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2958 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2959 " -smp options properly.");
2960 ht_warned = true;
2963 x86_cpu_apic_realize(cpu, &local_err);
2964 if (local_err != NULL) {
2965 goto out;
2967 cpu_reset(cs);
2969 xcc->parent_realize(dev, &local_err);
2971 out:
2972 if (local_err != NULL) {
2973 error_propagate(errp, local_err);
2974 return;
2978 typedef struct BitProperty {
2979 uint32_t *ptr;
2980 uint32_t mask;
2981 } BitProperty;
2983 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2984 void *opaque, Error **errp)
2986 BitProperty *fp = opaque;
2987 bool value = (*fp->ptr & fp->mask) == fp->mask;
2988 visit_type_bool(v, name, &value, errp);
2991 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2992 void *opaque, Error **errp)
2994 DeviceState *dev = DEVICE(obj);
2995 BitProperty *fp = opaque;
2996 Error *local_err = NULL;
2997 bool value;
2999 if (dev->realized) {
3000 qdev_prop_set_after_realize(dev, name, errp);
3001 return;
3004 visit_type_bool(v, name, &value, &local_err);
3005 if (local_err) {
3006 error_propagate(errp, local_err);
3007 return;
3010 if (value) {
3011 *fp->ptr |= fp->mask;
3012 } else {
3013 *fp->ptr &= ~fp->mask;
3017 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3018 void *opaque)
3020 BitProperty *prop = opaque;
3021 g_free(prop);
3024 /* Register a boolean property to get/set a single bit in a uint32_t field.
3026 * The same property name can be registered multiple times to make it affect
3027 * multiple bits in the same FeatureWord. In that case, the getter will return
3028 * true only if all bits are set.
3030 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3031 const char *prop_name,
3032 uint32_t *field,
3033 int bitnr)
3035 BitProperty *fp;
3036 ObjectProperty *op;
3037 uint32_t mask = (1UL << bitnr);
3039 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3040 if (op) {
3041 fp = op->opaque;
3042 assert(fp->ptr == field);
3043 fp->mask |= mask;
3044 } else {
3045 fp = g_new0(BitProperty, 1);
3046 fp->ptr = field;
3047 fp->mask = mask;
3048 object_property_add(OBJECT(cpu), prop_name, "bool",
3049 x86_cpu_get_bit_prop,
3050 x86_cpu_set_bit_prop,
3051 x86_cpu_release_bit_prop, fp, &error_abort);
3055 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3056 FeatureWord w,
3057 int bitnr)
3059 Object *obj = OBJECT(cpu);
3060 int i;
3061 char **names;
3062 FeatureWordInfo *fi = &feature_word_info[w];
3064 if (!fi->feat_names) {
3065 return;
3067 if (!fi->feat_names[bitnr]) {
3068 return;
3071 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3073 feat2prop(names[0]);
3074 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3076 for (i = 1; names[i]; i++) {
3077 feat2prop(names[i]);
3078 object_property_add_alias(obj, names[i], obj, names[0],
3079 &error_abort);
3082 g_strfreev(names);
3085 static void x86_cpu_initfn(Object *obj)
3087 CPUState *cs = CPU(obj);
3088 X86CPU *cpu = X86_CPU(obj);
3089 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3090 CPUX86State *env = &cpu->env;
3091 FeatureWord w;
3092 static int inited;
3094 cs->env_ptr = env;
3095 cpu_exec_init(cs, &error_abort);
3097 object_property_add(obj, "family", "int",
3098 x86_cpuid_version_get_family,
3099 x86_cpuid_version_set_family, NULL, NULL, NULL);
3100 object_property_add(obj, "model", "int",
3101 x86_cpuid_version_get_model,
3102 x86_cpuid_version_set_model, NULL, NULL, NULL);
3103 object_property_add(obj, "stepping", "int",
3104 x86_cpuid_version_get_stepping,
3105 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3106 object_property_add_str(obj, "vendor",
3107 x86_cpuid_get_vendor,
3108 x86_cpuid_set_vendor, NULL);
3109 object_property_add_str(obj, "model-id",
3110 x86_cpuid_get_model_id,
3111 x86_cpuid_set_model_id, NULL);
3112 object_property_add(obj, "tsc-frequency", "int",
3113 x86_cpuid_get_tsc_freq,
3114 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3115 object_property_add(obj, "apic-id", "int",
3116 x86_cpuid_get_apic_id,
3117 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3118 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3119 x86_cpu_get_feature_words,
3120 NULL, NULL, (void *)env->features, NULL);
3121 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3122 x86_cpu_get_feature_words,
3123 NULL, NULL, (void *)cpu->filtered_features, NULL);
3125 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3127 #ifndef CONFIG_USER_ONLY
3128 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3129 cpu->apic_id = -1;
3130 #endif
3132 for (w = 0; w < FEATURE_WORDS; w++) {
3133 int bitnr;
3135 for (bitnr = 0; bitnr < 32; bitnr++) {
3136 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3140 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3142 /* init various static tables used in TCG mode */
3143 if (tcg_enabled() && !inited) {
3144 inited = 1;
3145 tcg_x86_init();
3149 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3151 X86CPU *cpu = X86_CPU(cs);
3153 return cpu->apic_id;
3156 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3158 X86CPU *cpu = X86_CPU(cs);
3160 return cpu->env.cr[0] & CR0_PG_MASK;
3163 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3165 X86CPU *cpu = X86_CPU(cs);
3167 cpu->env.eip = value;
3170 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3172 X86CPU *cpu = X86_CPU(cs);
3174 cpu->env.eip = tb->pc - tb->cs_base;
3177 static bool x86_cpu_has_work(CPUState *cs)
3179 X86CPU *cpu = X86_CPU(cs);
3180 CPUX86State *env = &cpu->env;
3182 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3183 CPU_INTERRUPT_POLL)) &&
3184 (env->eflags & IF_MASK)) ||
3185 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3186 CPU_INTERRUPT_INIT |
3187 CPU_INTERRUPT_SIPI |
3188 CPU_INTERRUPT_MCE)) ||
3189 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3190 !(env->hflags & HF_SMM_MASK));
3193 static Property x86_cpu_properties[] = {
3194 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3195 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3196 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3197 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3198 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3199 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3200 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3201 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3202 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3203 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3204 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3205 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3206 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3207 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3208 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3209 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3210 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3211 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3212 DEFINE_PROP_END_OF_LIST()
3215 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3217 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3218 CPUClass *cc = CPU_CLASS(oc);
3219 DeviceClass *dc = DEVICE_CLASS(oc);
3221 xcc->parent_realize = dc->realize;
3222 dc->realize = x86_cpu_realizefn;
3223 dc->props = x86_cpu_properties;
3225 xcc->parent_reset = cc->reset;
3226 cc->reset = x86_cpu_reset;
3227 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3229 cc->class_by_name = x86_cpu_class_by_name;
3230 cc->parse_features = x86_cpu_parse_featurestr;
3231 cc->has_work = x86_cpu_has_work;
3232 cc->do_interrupt = x86_cpu_do_interrupt;
3233 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3234 cc->dump_state = x86_cpu_dump_state;
3235 cc->set_pc = x86_cpu_set_pc;
3236 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3237 cc->gdb_read_register = x86_cpu_gdb_read_register;
3238 cc->gdb_write_register = x86_cpu_gdb_write_register;
3239 cc->get_arch_id = x86_cpu_get_arch_id;
3240 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3241 #ifdef CONFIG_USER_ONLY
3242 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3243 #else
3244 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3245 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3246 cc->write_elf64_note = x86_cpu_write_elf64_note;
3247 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3248 cc->write_elf32_note = x86_cpu_write_elf32_note;
3249 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3250 cc->vmsd = &vmstate_x86_cpu;
3251 #endif
3252 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3253 #ifndef CONFIG_USER_ONLY
3254 cc->debug_excp_handler = breakpoint_handler;
3255 #endif
3256 cc->cpu_exec_enter = x86_cpu_exec_enter;
3257 cc->cpu_exec_exit = x86_cpu_exec_exit;
3260 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3261 * object in cpus -> dangling pointer after final object_unref().
3263 dc->cannot_destroy_with_object_finalize_yet = true;
3266 static const TypeInfo x86_cpu_type_info = {
3267 .name = TYPE_X86_CPU,
3268 .parent = TYPE_CPU,
3269 .instance_size = sizeof(X86CPU),
3270 .instance_init = x86_cpu_initfn,
3271 .abstract = true,
3272 .class_size = sizeof(X86CPUClass),
3273 .class_init = x86_cpu_common_class_init,
3276 static void x86_cpu_register_types(void)
3278 int i;
3280 type_register_static(&x86_cpu_type_info);
3281 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3282 x86_register_cpudef_type(&builtin_x86_defs[i]);
3284 #ifdef CONFIG_KVM
3285 type_register_static(&host_x86_cpu_type_info);
3286 #endif
3289 type_init(x86_cpu_register_types)