Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / target-i386 / cpu.c
blob2fffae014eeb708ae1aa85e8750ea22f08d217cc
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/cpus.h"
24 #include "kvm_i386.h"
26 #include "qemu/error-report.h"
27 #include "qemu/option.h"
28 #include "qemu/config-file.h"
29 #include "qapi/qmp/qerror.h"
31 #include "qapi-types.h"
32 #include "qapi-visit.h"
33 #include "qapi/visitor.h"
34 #include "sysemu/arch_init.h"
36 #include "hw/hw.h"
37 #if defined(CONFIG_KVM)
38 #include <linux/kvm_para.h>
39 #endif
41 #include "sysemu/sysemu.h"
42 #include "hw/qdev-properties.h"
43 #include "hw/cpu/icc_bus.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
46 #include "hw/xen/xen.h"
47 #include "hw/i386/apic_internal.h"
48 #endif
51 /* Cache topology CPUID constants: */
53 /* CPUID Leaf 2 Descriptors */
55 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
56 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
57 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 /* CPUID Leaf 4 constants: */
62 /* EAX: */
63 #define CPUID_4_TYPE_DCACHE 1
64 #define CPUID_4_TYPE_ICACHE 2
65 #define CPUID_4_TYPE_UNIFIED 3
67 #define CPUID_4_LEVEL(l) ((l) << 5)
69 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
70 #define CPUID_4_FULLY_ASSOC (1 << 9)
72 /* EDX: */
73 #define CPUID_4_NO_INVD_SHARING (1 << 0)
74 #define CPUID_4_INCLUSIVE (1 << 1)
75 #define CPUID_4_COMPLEX_IDX (1 << 2)
77 #define ASSOC_FULL 0xFF
79 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
80 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
81 a == 2 ? 0x2 : \
82 a == 4 ? 0x4 : \
83 a == 8 ? 0x6 : \
84 a == 16 ? 0x8 : \
85 a == 32 ? 0xA : \
86 a == 48 ? 0xB : \
87 a == 64 ? 0xC : \
88 a == 96 ? 0xD : \
89 a == 128 ? 0xE : \
90 a == ASSOC_FULL ? 0xF : \
91 0 /* invalid value */)
94 /* Definitions of the hardcoded cache entries we expose: */
96 /* L1 data cache: */
97 #define L1D_LINE_SIZE 64
98 #define L1D_ASSOCIATIVITY 8
99 #define L1D_SETS 64
100 #define L1D_PARTITIONS 1
101 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
102 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
103 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
104 #define L1D_LINES_PER_TAG 1
105 #define L1D_SIZE_KB_AMD 64
106 #define L1D_ASSOCIATIVITY_AMD 2
108 /* L1 instruction cache: */
109 #define L1I_LINE_SIZE 64
110 #define L1I_ASSOCIATIVITY 8
111 #define L1I_SETS 64
112 #define L1I_PARTITIONS 1
113 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
114 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
115 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
116 #define L1I_LINES_PER_TAG 1
117 #define L1I_SIZE_KB_AMD 64
118 #define L1I_ASSOCIATIVITY_AMD 2
120 /* Level 2 unified cache: */
121 #define L2_LINE_SIZE 64
122 #define L2_ASSOCIATIVITY 16
123 #define L2_SETS 4096
124 #define L2_PARTITIONS 1
125 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
126 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
127 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
128 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
129 #define L2_LINES_PER_TAG 1
130 #define L2_SIZE_KB_AMD 512
132 /* No L3 cache: */
133 #define L3_SIZE_KB 0 /* disabled */
134 #define L3_ASSOCIATIVITY 0 /* disabled */
135 #define L3_LINES_PER_TAG 0 /* disabled */
136 #define L3_LINE_SIZE 0 /* disabled */
138 /* TLB definitions: */
140 #define L1_DTLB_2M_ASSOC 1
141 #define L1_DTLB_2M_ENTRIES 255
142 #define L1_DTLB_4K_ASSOC 1
143 #define L1_DTLB_4K_ENTRIES 255
145 #define L1_ITLB_2M_ASSOC 1
146 #define L1_ITLB_2M_ENTRIES 255
147 #define L1_ITLB_4K_ASSOC 1
148 #define L1_ITLB_4K_ENTRIES 255
150 #define L2_DTLB_2M_ASSOC 0 /* disabled */
151 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
152 #define L2_DTLB_4K_ASSOC 4
153 #define L2_DTLB_4K_ENTRIES 512
155 #define L2_ITLB_2M_ASSOC 0 /* disabled */
156 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
157 #define L2_ITLB_4K_ASSOC 4
158 #define L2_ITLB_4K_ENTRIES 512
162 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
163 uint32_t vendor2, uint32_t vendor3)
165 int i;
166 for (i = 0; i < 4; i++) {
167 dst[i] = vendor1 >> (8 * i);
168 dst[i + 4] = vendor2 >> (8 * i);
169 dst[i + 8] = vendor3 >> (8 * i);
171 dst[CPUID_VENDOR_SZ] = '\0';
174 /* feature flags taken from "Intel Processor Identification and the CPUID
175 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
176 * between feature naming conventions, aliases may be added.
178 static const char *feature_name[] = {
179 "fpu", "vme", "de", "pse",
180 "tsc", "msr", "pae", "mce",
181 "cx8", "apic", NULL, "sep",
182 "mtrr", "pge", "mca", "cmov",
183 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
184 NULL, "ds" /* Intel dts */, "acpi", "mmx",
185 "fxsr", "sse", "sse2", "ss",
186 "ht" /* Intel htt */, "tm", "ia64", "pbe",
188 static const char *ext_feature_name[] = {
189 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
190 "ds_cpl", "vmx", "smx", "est",
191 "tm2", "ssse3", "cid", NULL,
192 "fma", "cx16", "xtpr", "pdcm",
193 NULL, "pcid", "dca", "sse4.1|sse4_1",
194 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
195 "tsc-deadline", "aes", "xsave", "osxsave",
196 "avx", "f16c", "rdrand", "hypervisor",
198 /* Feature names that are already defined on feature_name[] but are set on
199 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
200 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
201 * if and only if CPU vendor is AMD.
203 static const char *ext2_feature_name[] = {
204 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
205 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
206 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
207 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
208 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
209 "nx|xd", NULL, "mmxext", NULL /* mmx */,
210 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
211 NULL, "lm|i64", "3dnowext", "3dnow",
213 static const char *ext3_feature_name[] = {
214 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
215 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
216 "3dnowprefetch", "osvw", "ibs", "xop",
217 "skinit", "wdt", NULL, "lwp",
218 "fma4", "tce", NULL, "nodeid_msr",
219 NULL, "tbm", "topoext", "perfctr_core",
220 "perfctr_nb", NULL, NULL, NULL,
221 NULL, NULL, NULL, NULL,
224 static const char *ext4_feature_name[] = {
225 NULL, NULL, "xstore", "xstore-en",
226 NULL, NULL, "xcrypt", "xcrypt-en",
227 "ace2", "ace2-en", "phe", "phe-en",
228 "pmm", "pmm-en", NULL, NULL,
229 NULL, NULL, NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
235 static const char *kvm_feature_name[] = {
236 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
237 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
238 NULL, NULL, NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 "kvmclock-stable-bit", NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
246 static const char *svm_feature_name[] = {
247 "npt", "lbrv", "svm_lock", "nrip_save",
248 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
249 NULL, NULL, "pause_filter", NULL,
250 "pfthreshold", NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
257 static const char *cpuid_7_0_ebx_feature_name[] = {
258 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
259 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
260 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
261 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 static const char *cpuid_apm_edx_feature_name[] = {
265 NULL, NULL, NULL, NULL,
266 NULL, NULL, NULL, NULL,
267 "invtsc", NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
275 static const char *cpuid_xsave_feature_name[] = {
276 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
277 NULL, NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
286 static const char *cpuid_6_feature_name[] = {
287 NULL, NULL, "arat", NULL,
288 NULL, NULL, NULL, NULL,
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
297 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
298 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
299 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
300 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
301 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
302 CPUID_PSE36 | CPUID_FXSR)
303 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
304 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
305 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
306 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
307 CPUID_PAE | CPUID_SEP | CPUID_APIC)
309 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
310 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
311 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
312 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
313 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
314 /* partly implemented:
315 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
316 /* missing:
317 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
318 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
319 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
320 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
321 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
322 /* missing:
323 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
324 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
325 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
326 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
327 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
328 CPUID_EXT_RDRAND */
330 #ifdef TARGET_X86_64
331 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
332 #else
333 #define TCG_EXT2_X86_64_FEATURES 0
334 #endif
336 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
337 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
338 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
339 TCG_EXT2_X86_64_FEATURES)
340 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
341 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
342 #define TCG_EXT4_FEATURES 0
343 #define TCG_SVM_FEATURES 0
344 #define TCG_KVM_FEATURES 0
345 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
346 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
347 /* missing:
348 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
349 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
350 CPUID_7_0_EBX_RDSEED */
351 #define TCG_APM_FEATURES 0
352 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
355 typedef struct FeatureWordInfo {
356 const char **feat_names;
357 uint32_t cpuid_eax; /* Input EAX for CPUID */
358 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
359 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
360 int cpuid_reg; /* output register (R_* constant) */
361 uint32_t tcg_features; /* Feature flags supported by TCG */
362 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
363 } FeatureWordInfo;
365 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
366 [FEAT_1_EDX] = {
367 .feat_names = feature_name,
368 .cpuid_eax = 1, .cpuid_reg = R_EDX,
369 .tcg_features = TCG_FEATURES,
371 [FEAT_1_ECX] = {
372 .feat_names = ext_feature_name,
373 .cpuid_eax = 1, .cpuid_reg = R_ECX,
374 .tcg_features = TCG_EXT_FEATURES,
376 [FEAT_8000_0001_EDX] = {
377 .feat_names = ext2_feature_name,
378 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
379 .tcg_features = TCG_EXT2_FEATURES,
381 [FEAT_8000_0001_ECX] = {
382 .feat_names = ext3_feature_name,
383 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
384 .tcg_features = TCG_EXT3_FEATURES,
386 [FEAT_C000_0001_EDX] = {
387 .feat_names = ext4_feature_name,
388 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
389 .tcg_features = TCG_EXT4_FEATURES,
391 [FEAT_KVM] = {
392 .feat_names = kvm_feature_name,
393 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
394 .tcg_features = TCG_KVM_FEATURES,
396 [FEAT_SVM] = {
397 .feat_names = svm_feature_name,
398 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
399 .tcg_features = TCG_SVM_FEATURES,
401 [FEAT_7_0_EBX] = {
402 .feat_names = cpuid_7_0_ebx_feature_name,
403 .cpuid_eax = 7,
404 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
405 .cpuid_reg = R_EBX,
406 .tcg_features = TCG_7_0_EBX_FEATURES,
408 [FEAT_8000_0007_EDX] = {
409 .feat_names = cpuid_apm_edx_feature_name,
410 .cpuid_eax = 0x80000007,
411 .cpuid_reg = R_EDX,
412 .tcg_features = TCG_APM_FEATURES,
413 .unmigratable_flags = CPUID_APM_INVTSC,
415 [FEAT_XSAVE] = {
416 .feat_names = cpuid_xsave_feature_name,
417 .cpuid_eax = 0xd,
418 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
419 .cpuid_reg = R_EAX,
420 .tcg_features = 0,
422 [FEAT_6_EAX] = {
423 .feat_names = cpuid_6_feature_name,
424 .cpuid_eax = 6, .cpuid_reg = R_EAX,
425 .tcg_features = TCG_6_EAX_FEATURES,
429 typedef struct X86RegisterInfo32 {
430 /* Name of register */
431 const char *name;
432 /* QAPI enum value register */
433 X86CPURegister32 qapi_enum;
434 } X86RegisterInfo32;
436 #define REGISTER(reg) \
437 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
438 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
439 REGISTER(EAX),
440 REGISTER(ECX),
441 REGISTER(EDX),
442 REGISTER(EBX),
443 REGISTER(ESP),
444 REGISTER(EBP),
445 REGISTER(ESI),
446 REGISTER(EDI),
448 #undef REGISTER
450 typedef struct ExtSaveArea {
451 uint32_t feature, bits;
452 uint32_t offset, size;
453 } ExtSaveArea;
455 static const ExtSaveArea ext_save_areas[] = {
456 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
457 .offset = 0x240, .size = 0x100 },
458 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
459 .offset = 0x3c0, .size = 0x40 },
460 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
461 .offset = 0x400, .size = 0x40 },
462 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
463 .offset = 0x440, .size = 0x40 },
464 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
465 .offset = 0x480, .size = 0x200 },
466 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
467 .offset = 0x680, .size = 0x400 },
470 const char *get_register_name_32(unsigned int reg)
472 if (reg >= CPU_NB_REGS32) {
473 return NULL;
475 return x86_reg_info_32[reg].name;
478 /* KVM-specific features that are automatically added to all CPU models
479 * when KVM is enabled.
481 static uint32_t kvm_default_features[FEATURE_WORDS] = {
482 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
483 (1 << KVM_FEATURE_NOP_IO_DELAY) |
484 (1 << KVM_FEATURE_CLOCKSOURCE2) |
485 (1 << KVM_FEATURE_ASYNC_PF) |
486 (1 << KVM_FEATURE_STEAL_TIME) |
487 (1 << KVM_FEATURE_PV_EOI) |
488 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
489 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
492 /* Features that are not added by default to any CPU model when KVM is enabled.
494 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
495 [FEAT_1_EDX] = CPUID_ACPI,
496 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
497 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
500 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
502 kvm_default_features[w] &= ~features;
505 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
507 kvm_default_unset_features[w] &= ~features;
511 * Returns the set of feature flags that are supported and migratable by
512 * QEMU, for a given FeatureWord.
514 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
516 FeatureWordInfo *wi = &feature_word_info[w];
517 uint32_t r = 0;
518 int i;
520 for (i = 0; i < 32; i++) {
521 uint32_t f = 1U << i;
522 /* If the feature name is unknown, it is not supported by QEMU yet */
523 if (!wi->feat_names[i]) {
524 continue;
526 /* Skip features known to QEMU, but explicitly marked as unmigratable */
527 if (wi->unmigratable_flags & f) {
528 continue;
530 r |= f;
532 return r;
535 void host_cpuid(uint32_t function, uint32_t count,
536 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
538 uint32_t vec[4];
540 #ifdef __x86_64__
541 asm volatile("cpuid"
542 : "=a"(vec[0]), "=b"(vec[1]),
543 "=c"(vec[2]), "=d"(vec[3])
544 : "0"(function), "c"(count) : "cc");
545 #elif defined(__i386__)
546 asm volatile("pusha \n\t"
547 "cpuid \n\t"
548 "mov %%eax, 0(%2) \n\t"
549 "mov %%ebx, 4(%2) \n\t"
550 "mov %%ecx, 8(%2) \n\t"
551 "mov %%edx, 12(%2) \n\t"
552 "popa"
553 : : "a"(function), "c"(count), "S"(vec)
554 : "memory", "cc");
555 #else
556 abort();
557 #endif
559 if (eax)
560 *eax = vec[0];
561 if (ebx)
562 *ebx = vec[1];
563 if (ecx)
564 *ecx = vec[2];
565 if (edx)
566 *edx = vec[3];
569 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
571 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
572 * a substring. ex if !NULL points to the first char after a substring,
573 * otherwise the string is assumed to sized by a terminating nul.
574 * Return lexical ordering of *s1:*s2.
576 static int sstrcmp(const char *s1, const char *e1,
577 const char *s2, const char *e2)
579 for (;;) {
580 if (!*s1 || !*s2 || *s1 != *s2)
581 return (*s1 - *s2);
582 ++s1, ++s2;
583 if (s1 == e1 && s2 == e2)
584 return (0);
585 else if (s1 == e1)
586 return (*s2);
587 else if (s2 == e2)
588 return (*s1);
592 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
593 * '|' delimited (possibly empty) strings in which case search for a match
594 * within the alternatives proceeds left to right. Return 0 for success,
595 * non-zero otherwise.
597 static int altcmp(const char *s, const char *e, const char *altstr)
599 const char *p, *q;
601 for (q = p = altstr; ; ) {
602 while (*p && *p != '|')
603 ++p;
604 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
605 return (0);
606 if (!*p)
607 return (1);
608 else
609 q = ++p;
613 /* search featureset for flag *[s..e), if found set corresponding bit in
614 * *pval and return true, otherwise return false
616 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
617 const char **featureset)
619 uint32_t mask;
620 const char **ppc;
621 bool found = false;
623 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
624 if (*ppc && !altcmp(s, e, *ppc)) {
625 *pval |= mask;
626 found = true;
629 return found;
632 static void add_flagname_to_bitmaps(const char *flagname,
633 FeatureWordArray words,
634 Error **errp)
636 FeatureWord w;
637 for (w = 0; w < FEATURE_WORDS; w++) {
638 FeatureWordInfo *wi = &feature_word_info[w];
639 if (wi->feat_names &&
640 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
641 break;
644 if (w == FEATURE_WORDS) {
645 error_setg(errp, "CPU feature %s not found", flagname);
649 /* CPU class name definitions: */
651 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
652 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
654 /* Return type name for a given CPU model name
655 * Caller is responsible for freeing the returned string.
657 static char *x86_cpu_type_name(const char *model_name)
659 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
662 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
664 ObjectClass *oc;
665 char *typename;
667 if (cpu_model == NULL) {
668 return NULL;
671 typename = x86_cpu_type_name(cpu_model);
672 oc = object_class_by_name(typename);
673 g_free(typename);
674 return oc;
677 struct X86CPUDefinition {
678 const char *name;
679 uint32_t level;
680 uint32_t xlevel;
681 uint32_t xlevel2;
682 /* vendor is zero-terminated, 12 character ASCII string */
683 char vendor[CPUID_VENDOR_SZ + 1];
684 int family;
685 int model;
686 int stepping;
687 FeatureWordArray features;
688 char model_id[48];
689 bool cache_info_passthrough;
692 static X86CPUDefinition builtin_x86_defs[] = {
694 .name = "qemu64",
695 .level = 0xd,
696 .vendor = CPUID_VENDOR_AMD,
697 .family = 6,
698 .model = 6,
699 .stepping = 3,
700 .features[FEAT_1_EDX] =
701 PPRO_FEATURES |
702 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
703 CPUID_PSE36,
704 .features[FEAT_1_ECX] =
705 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
706 .features[FEAT_8000_0001_EDX] =
707 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
708 .features[FEAT_8000_0001_ECX] =
709 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
710 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
711 .xlevel = 0x8000000A,
714 .name = "phenom",
715 .level = 5,
716 .vendor = CPUID_VENDOR_AMD,
717 .family = 16,
718 .model = 2,
719 .stepping = 3,
720 /* Missing: CPUID_HT */
721 .features[FEAT_1_EDX] =
722 PPRO_FEATURES |
723 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
724 CPUID_PSE36 | CPUID_VME,
725 .features[FEAT_1_ECX] =
726 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
727 CPUID_EXT_POPCNT,
728 .features[FEAT_8000_0001_EDX] =
729 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
730 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
731 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
732 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
733 CPUID_EXT3_CR8LEG,
734 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
735 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
736 .features[FEAT_8000_0001_ECX] =
737 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
738 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
739 /* Missing: CPUID_SVM_LBRV */
740 .features[FEAT_SVM] =
741 CPUID_SVM_NPT,
742 .xlevel = 0x8000001A,
743 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
746 .name = "core2duo",
747 .level = 10,
748 .vendor = CPUID_VENDOR_INTEL,
749 .family = 6,
750 .model = 15,
751 .stepping = 11,
752 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
753 .features[FEAT_1_EDX] =
754 PPRO_FEATURES |
755 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
756 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
757 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
758 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
759 .features[FEAT_1_ECX] =
760 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
761 CPUID_EXT_CX16,
762 .features[FEAT_8000_0001_EDX] =
763 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
764 .features[FEAT_8000_0001_ECX] =
765 CPUID_EXT3_LAHF_LM,
766 .xlevel = 0x80000008,
767 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
770 .name = "kvm64",
771 .level = 0xd,
772 .vendor = CPUID_VENDOR_INTEL,
773 .family = 15,
774 .model = 6,
775 .stepping = 1,
776 /* Missing: CPUID_HT */
777 .features[FEAT_1_EDX] =
778 PPRO_FEATURES | CPUID_VME |
779 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
780 CPUID_PSE36,
781 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
782 .features[FEAT_1_ECX] =
783 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
784 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
785 .features[FEAT_8000_0001_EDX] =
786 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
787 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
788 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
789 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
790 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
791 .features[FEAT_8000_0001_ECX] =
793 .xlevel = 0x80000008,
794 .model_id = "Common KVM processor"
797 .name = "qemu32",
798 .level = 4,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 6,
801 .model = 6,
802 .stepping = 3,
803 .features[FEAT_1_EDX] =
804 PPRO_FEATURES,
805 .features[FEAT_1_ECX] =
806 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
807 .xlevel = 0x80000004,
810 .name = "kvm32",
811 .level = 5,
812 .vendor = CPUID_VENDOR_INTEL,
813 .family = 15,
814 .model = 6,
815 .stepping = 1,
816 .features[FEAT_1_EDX] =
817 PPRO_FEATURES | CPUID_VME |
818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
819 .features[FEAT_1_ECX] =
820 CPUID_EXT_SSE3,
821 .features[FEAT_8000_0001_ECX] =
823 .xlevel = 0x80000008,
824 .model_id = "Common 32-bit KVM processor"
827 .name = "coreduo",
828 .level = 10,
829 .vendor = CPUID_VENDOR_INTEL,
830 .family = 6,
831 .model = 14,
832 .stepping = 8,
833 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
834 .features[FEAT_1_EDX] =
835 PPRO_FEATURES | CPUID_VME |
836 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
837 CPUID_SS,
838 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
839 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
840 .features[FEAT_1_ECX] =
841 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
842 .features[FEAT_8000_0001_EDX] =
843 CPUID_EXT2_NX,
844 .xlevel = 0x80000008,
845 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
848 .name = "486",
849 .level = 1,
850 .vendor = CPUID_VENDOR_INTEL,
851 .family = 4,
852 .model = 8,
853 .stepping = 0,
854 .features[FEAT_1_EDX] =
855 I486_FEATURES,
856 .xlevel = 0,
859 .name = "pentium",
860 .level = 1,
861 .vendor = CPUID_VENDOR_INTEL,
862 .family = 5,
863 .model = 4,
864 .stepping = 3,
865 .features[FEAT_1_EDX] =
866 PENTIUM_FEATURES,
867 .xlevel = 0,
870 .name = "pentium2",
871 .level = 2,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 5,
875 .stepping = 2,
876 .features[FEAT_1_EDX] =
877 PENTIUM2_FEATURES,
878 .xlevel = 0,
881 .name = "pentium3",
882 .level = 3,
883 .vendor = CPUID_VENDOR_INTEL,
884 .family = 6,
885 .model = 7,
886 .stepping = 3,
887 .features[FEAT_1_EDX] =
888 PENTIUM3_FEATURES,
889 .xlevel = 0,
892 .name = "athlon",
893 .level = 2,
894 .vendor = CPUID_VENDOR_AMD,
895 .family = 6,
896 .model = 2,
897 .stepping = 3,
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
900 CPUID_MCA,
901 .features[FEAT_8000_0001_EDX] =
902 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
903 .xlevel = 0x80000008,
906 .name = "n270",
907 .level = 10,
908 .vendor = CPUID_VENDOR_INTEL,
909 .family = 6,
910 .model = 28,
911 .stepping = 2,
912 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
913 .features[FEAT_1_EDX] =
914 PPRO_FEATURES |
915 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
916 CPUID_ACPI | CPUID_SS,
917 /* Some CPUs got no CPUID_SEP */
918 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
919 * CPUID_EXT_XTPR */
920 .features[FEAT_1_ECX] =
921 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
922 CPUID_EXT_MOVBE,
923 .features[FEAT_8000_0001_EDX] =
924 CPUID_EXT2_NX,
925 .features[FEAT_8000_0001_ECX] =
926 CPUID_EXT3_LAHF_LM,
927 .xlevel = 0x80000008,
928 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
931 .name = "Conroe",
932 .level = 10,
933 .vendor = CPUID_VENDOR_INTEL,
934 .family = 6,
935 .model = 15,
936 .stepping = 3,
937 .features[FEAT_1_EDX] =
938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
942 CPUID_DE | CPUID_FP87,
943 .features[FEAT_1_ECX] =
944 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
945 .features[FEAT_8000_0001_EDX] =
946 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
947 .features[FEAT_8000_0001_ECX] =
948 CPUID_EXT3_LAHF_LM,
949 .xlevel = 0x80000008,
950 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
953 .name = "Penryn",
954 .level = 10,
955 .vendor = CPUID_VENDOR_INTEL,
956 .family = 6,
957 .model = 23,
958 .stepping = 3,
959 .features[FEAT_1_EDX] =
960 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
961 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
962 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
963 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
964 CPUID_DE | CPUID_FP87,
965 .features[FEAT_1_ECX] =
966 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
967 CPUID_EXT_SSE3,
968 .features[FEAT_8000_0001_EDX] =
969 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
970 .features[FEAT_8000_0001_ECX] =
971 CPUID_EXT3_LAHF_LM,
972 .xlevel = 0x80000008,
973 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
976 .name = "Nehalem",
977 .level = 11,
978 .vendor = CPUID_VENDOR_INTEL,
979 .family = 6,
980 .model = 26,
981 .stepping = 3,
982 .features[FEAT_1_EDX] =
983 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
984 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
985 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
986 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
987 CPUID_DE | CPUID_FP87,
988 .features[FEAT_1_ECX] =
989 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
990 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
991 .features[FEAT_8000_0001_EDX] =
992 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
993 .features[FEAT_8000_0001_ECX] =
994 CPUID_EXT3_LAHF_LM,
995 .xlevel = 0x80000008,
996 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
999 .name = "Westmere",
1000 .level = 11,
1001 .vendor = CPUID_VENDOR_INTEL,
1002 .family = 6,
1003 .model = 44,
1004 .stepping = 1,
1005 .features[FEAT_1_EDX] =
1006 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1007 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1008 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1009 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1010 CPUID_DE | CPUID_FP87,
1011 .features[FEAT_1_ECX] =
1012 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1013 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1014 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1015 .features[FEAT_8000_0001_EDX] =
1016 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1017 .features[FEAT_8000_0001_ECX] =
1018 CPUID_EXT3_LAHF_LM,
1019 .features[FEAT_6_EAX] =
1020 CPUID_6_EAX_ARAT,
1021 .xlevel = 0x80000008,
1022 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1025 .name = "SandyBridge",
1026 .level = 0xd,
1027 .vendor = CPUID_VENDOR_INTEL,
1028 .family = 6,
1029 .model = 42,
1030 .stepping = 1,
1031 .features[FEAT_1_EDX] =
1032 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1036 CPUID_DE | CPUID_FP87,
1037 .features[FEAT_1_ECX] =
1038 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1039 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1040 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1041 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1042 CPUID_EXT_SSE3,
1043 .features[FEAT_8000_0001_EDX] =
1044 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1045 CPUID_EXT2_SYSCALL,
1046 .features[FEAT_8000_0001_ECX] =
1047 CPUID_EXT3_LAHF_LM,
1048 .features[FEAT_XSAVE] =
1049 CPUID_XSAVE_XSAVEOPT,
1050 .features[FEAT_6_EAX] =
1051 CPUID_6_EAX_ARAT,
1052 .xlevel = 0x80000008,
1053 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1056 .name = "IvyBridge",
1057 .level = 0xd,
1058 .vendor = CPUID_VENDOR_INTEL,
1059 .family = 6,
1060 .model = 58,
1061 .stepping = 9,
1062 .features[FEAT_1_EDX] =
1063 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1064 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1065 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1066 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1067 CPUID_DE | CPUID_FP87,
1068 .features[FEAT_1_ECX] =
1069 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1070 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1071 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1072 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1073 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1074 .features[FEAT_7_0_EBX] =
1075 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1076 CPUID_7_0_EBX_ERMS,
1077 .features[FEAT_8000_0001_EDX] =
1078 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1079 CPUID_EXT2_SYSCALL,
1080 .features[FEAT_8000_0001_ECX] =
1081 CPUID_EXT3_LAHF_LM,
1082 .features[FEAT_XSAVE] =
1083 CPUID_XSAVE_XSAVEOPT,
1084 .features[FEAT_6_EAX] =
1085 CPUID_6_EAX_ARAT,
1086 .xlevel = 0x80000008,
1087 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1090 .name = "Haswell-noTSX",
1091 .level = 0xd,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 60,
1095 .stepping = 1,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1104 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1105 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1106 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1107 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1108 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1109 .features[FEAT_8000_0001_EDX] =
1110 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1111 CPUID_EXT2_SYSCALL,
1112 .features[FEAT_8000_0001_ECX] =
1113 CPUID_EXT3_LAHF_LM,
1114 .features[FEAT_7_0_EBX] =
1115 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1116 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1117 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1118 .features[FEAT_XSAVE] =
1119 CPUID_XSAVE_XSAVEOPT,
1120 .features[FEAT_6_EAX] =
1121 CPUID_6_EAX_ARAT,
1122 .xlevel = 0x80000008,
1123 .model_id = "Intel Core Processor (Haswell, no TSX)",
1124 }, {
1125 .name = "Haswell",
1126 .level = 0xd,
1127 .vendor = CPUID_VENDOR_INTEL,
1128 .family = 6,
1129 .model = 60,
1130 .stepping = 1,
1131 .features[FEAT_1_EDX] =
1132 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1133 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1134 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1135 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1136 CPUID_DE | CPUID_FP87,
1137 .features[FEAT_1_ECX] =
1138 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1139 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1140 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1141 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1142 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1143 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1144 .features[FEAT_8000_0001_EDX] =
1145 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1146 CPUID_EXT2_SYSCALL,
1147 .features[FEAT_8000_0001_ECX] =
1148 CPUID_EXT3_LAHF_LM,
1149 .features[FEAT_7_0_EBX] =
1150 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1151 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1152 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1153 CPUID_7_0_EBX_RTM,
1154 .features[FEAT_XSAVE] =
1155 CPUID_XSAVE_XSAVEOPT,
1156 .features[FEAT_6_EAX] =
1157 CPUID_6_EAX_ARAT,
1158 .xlevel = 0x80000008,
1159 .model_id = "Intel Core Processor (Haswell)",
1162 .name = "Broadwell-noTSX",
1163 .level = 0xd,
1164 .vendor = CPUID_VENDOR_INTEL,
1165 .family = 6,
1166 .model = 61,
1167 .stepping = 2,
1168 .features[FEAT_1_EDX] =
1169 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1170 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1171 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1172 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1173 CPUID_DE | CPUID_FP87,
1174 .features[FEAT_1_ECX] =
1175 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1176 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1177 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1178 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1179 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1180 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1181 .features[FEAT_8000_0001_EDX] =
1182 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1183 CPUID_EXT2_SYSCALL,
1184 .features[FEAT_8000_0001_ECX] =
1185 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1186 .features[FEAT_7_0_EBX] =
1187 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1188 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1189 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1190 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1191 CPUID_7_0_EBX_SMAP,
1192 .features[FEAT_XSAVE] =
1193 CPUID_XSAVE_XSAVEOPT,
1194 .features[FEAT_6_EAX] =
1195 CPUID_6_EAX_ARAT,
1196 .xlevel = 0x80000008,
1197 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1200 .name = "Broadwell",
1201 .level = 0xd,
1202 .vendor = CPUID_VENDOR_INTEL,
1203 .family = 6,
1204 .model = 61,
1205 .stepping = 2,
1206 .features[FEAT_1_EDX] =
1207 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1208 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1209 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1210 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1211 CPUID_DE | CPUID_FP87,
1212 .features[FEAT_1_ECX] =
1213 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1214 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1215 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1216 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1217 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1218 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1219 .features[FEAT_8000_0001_EDX] =
1220 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1221 CPUID_EXT2_SYSCALL,
1222 .features[FEAT_8000_0001_ECX] =
1223 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1224 .features[FEAT_7_0_EBX] =
1225 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1226 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1227 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1228 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1229 CPUID_7_0_EBX_SMAP,
1230 .features[FEAT_XSAVE] =
1231 CPUID_XSAVE_XSAVEOPT,
1232 .features[FEAT_6_EAX] =
1233 CPUID_6_EAX_ARAT,
1234 .xlevel = 0x80000008,
1235 .model_id = "Intel Core Processor (Broadwell)",
1238 .name = "Opteron_G1",
1239 .level = 5,
1240 .vendor = CPUID_VENDOR_AMD,
1241 .family = 15,
1242 .model = 6,
1243 .stepping = 1,
1244 .features[FEAT_1_EDX] =
1245 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1246 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1247 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1248 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1249 CPUID_DE | CPUID_FP87,
1250 .features[FEAT_1_ECX] =
1251 CPUID_EXT_SSE3,
1252 .features[FEAT_8000_0001_EDX] =
1253 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1254 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1255 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1256 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1257 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1258 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1259 .xlevel = 0x80000008,
1260 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1263 .name = "Opteron_G2",
1264 .level = 5,
1265 .vendor = CPUID_VENDOR_AMD,
1266 .family = 15,
1267 .model = 6,
1268 .stepping = 1,
1269 .features[FEAT_1_EDX] =
1270 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1271 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1272 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1273 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1274 CPUID_DE | CPUID_FP87,
1275 .features[FEAT_1_ECX] =
1276 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1277 .features[FEAT_8000_0001_EDX] =
1278 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1279 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1280 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1281 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1282 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1283 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1284 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1285 .features[FEAT_8000_0001_ECX] =
1286 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1287 .xlevel = 0x80000008,
1288 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1291 .name = "Opteron_G3",
1292 .level = 5,
1293 .vendor = CPUID_VENDOR_AMD,
1294 .family = 15,
1295 .model = 6,
1296 .stepping = 1,
1297 .features[FEAT_1_EDX] =
1298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1302 CPUID_DE | CPUID_FP87,
1303 .features[FEAT_1_ECX] =
1304 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1305 CPUID_EXT_SSE3,
1306 .features[FEAT_8000_0001_EDX] =
1307 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1308 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1309 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1310 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1311 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1312 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1313 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1314 .features[FEAT_8000_0001_ECX] =
1315 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1316 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1317 .xlevel = 0x80000008,
1318 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1321 .name = "Opteron_G4",
1322 .level = 0xd,
1323 .vendor = CPUID_VENDOR_AMD,
1324 .family = 21,
1325 .model = 1,
1326 .stepping = 2,
1327 .features[FEAT_1_EDX] =
1328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1332 CPUID_DE | CPUID_FP87,
1333 .features[FEAT_1_ECX] =
1334 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1335 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1336 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1337 CPUID_EXT_SSE3,
1338 .features[FEAT_8000_0001_EDX] =
1339 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1340 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1341 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1342 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1343 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1344 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1345 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1346 .features[FEAT_8000_0001_ECX] =
1347 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1348 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1349 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1350 CPUID_EXT3_LAHF_LM,
1351 /* no xsaveopt! */
1352 .xlevel = 0x8000001A,
1353 .model_id = "AMD Opteron 62xx class CPU",
1356 .name = "Opteron_G5",
1357 .level = 0xd,
1358 .vendor = CPUID_VENDOR_AMD,
1359 .family = 21,
1360 .model = 2,
1361 .stepping = 0,
1362 .features[FEAT_1_EDX] =
1363 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1364 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1365 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1366 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1367 CPUID_DE | CPUID_FP87,
1368 .features[FEAT_1_ECX] =
1369 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1370 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1371 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1372 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1373 .features[FEAT_8000_0001_EDX] =
1374 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1375 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1376 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1377 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1378 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1379 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1380 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1381 .features[FEAT_8000_0001_ECX] =
1382 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1383 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1384 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1385 CPUID_EXT3_LAHF_LM,
1386 /* no xsaveopt! */
1387 .xlevel = 0x8000001A,
1388 .model_id = "AMD Opteron 63xx class CPU",
1392 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1393 bool migratable_only);
1395 #ifdef CONFIG_KVM
1397 static int cpu_x86_fill_model_id(char *str)
1399 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1400 int i;
1402 for (i = 0; i < 3; i++) {
1403 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1404 memcpy(str + i * 16 + 0, &eax, 4);
1405 memcpy(str + i * 16 + 4, &ebx, 4);
1406 memcpy(str + i * 16 + 8, &ecx, 4);
1407 memcpy(str + i * 16 + 12, &edx, 4);
1409 return 0;
1412 static X86CPUDefinition host_cpudef;
1414 static Property host_x86_cpu_properties[] = {
1415 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1416 DEFINE_PROP_END_OF_LIST()
1419 /* class_init for the "host" CPU model
1421 * This function may be called before KVM is initialized.
1423 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1425 DeviceClass *dc = DEVICE_CLASS(oc);
1426 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1427 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1429 xcc->kvm_required = true;
1431 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1432 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1434 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1435 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1436 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1437 host_cpudef.stepping = eax & 0x0F;
1439 cpu_x86_fill_model_id(host_cpudef.model_id);
1441 xcc->cpu_def = &host_cpudef;
1442 host_cpudef.cache_info_passthrough = true;
1444 /* level, xlevel, xlevel2, and the feature words are initialized on
1445 * instance_init, because they require KVM to be initialized.
1448 dc->props = host_x86_cpu_properties;
1451 static void host_x86_cpu_initfn(Object *obj)
1453 X86CPU *cpu = X86_CPU(obj);
1454 CPUX86State *env = &cpu->env;
1455 KVMState *s = kvm_state;
1457 assert(kvm_enabled());
1459 /* We can't fill the features array here because we don't know yet if
1460 * "migratable" is true or false.
1462 cpu->host_features = true;
1464 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1465 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1466 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1468 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1471 static const TypeInfo host_x86_cpu_type_info = {
1472 .name = X86_CPU_TYPE_NAME("host"),
1473 .parent = TYPE_X86_CPU,
1474 .instance_init = host_x86_cpu_initfn,
1475 .class_init = host_x86_cpu_class_init,
1478 #endif
1480 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1482 FeatureWordInfo *f = &feature_word_info[w];
1483 int i;
1485 for (i = 0; i < 32; ++i) {
1486 if (1 << i & mask) {
1487 const char *reg = get_register_name_32(f->cpuid_reg);
1488 assert(reg);
1489 fprintf(stderr, "warning: %s doesn't support requested feature: "
1490 "CPUID.%02XH:%s%s%s [bit %d]\n",
1491 kvm_enabled() ? "host" : "TCG",
1492 f->cpuid_eax, reg,
1493 f->feat_names[i] ? "." : "",
1494 f->feat_names[i] ? f->feat_names[i] : "", i);
1499 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1500 const char *name, Error **errp)
1502 X86CPU *cpu = X86_CPU(obj);
1503 CPUX86State *env = &cpu->env;
1504 int64_t value;
1506 value = (env->cpuid_version >> 8) & 0xf;
1507 if (value == 0xf) {
1508 value += (env->cpuid_version >> 20) & 0xff;
1510 visit_type_int(v, &value, name, errp);
1513 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1514 const char *name, Error **errp)
1516 X86CPU *cpu = X86_CPU(obj);
1517 CPUX86State *env = &cpu->env;
1518 const int64_t min = 0;
1519 const int64_t max = 0xff + 0xf;
1520 Error *local_err = NULL;
1521 int64_t value;
1523 visit_type_int(v, &value, name, &local_err);
1524 if (local_err) {
1525 error_propagate(errp, local_err);
1526 return;
1528 if (value < min || value > max) {
1529 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1530 name ? name : "null", value, min, max);
1531 return;
1534 env->cpuid_version &= ~0xff00f00;
1535 if (value > 0x0f) {
1536 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1537 } else {
1538 env->cpuid_version |= value << 8;
1542 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1543 const char *name, Error **errp)
1545 X86CPU *cpu = X86_CPU(obj);
1546 CPUX86State *env = &cpu->env;
1547 int64_t value;
1549 value = (env->cpuid_version >> 4) & 0xf;
1550 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1551 visit_type_int(v, &value, name, errp);
1554 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1555 const char *name, Error **errp)
1557 X86CPU *cpu = X86_CPU(obj);
1558 CPUX86State *env = &cpu->env;
1559 const int64_t min = 0;
1560 const int64_t max = 0xff;
1561 Error *local_err = NULL;
1562 int64_t value;
1564 visit_type_int(v, &value, name, &local_err);
1565 if (local_err) {
1566 error_propagate(errp, local_err);
1567 return;
1569 if (value < min || value > max) {
1570 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1571 name ? name : "null", value, min, max);
1572 return;
1575 env->cpuid_version &= ~0xf00f0;
1576 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1579 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1580 void *opaque, const char *name,
1581 Error **errp)
1583 X86CPU *cpu = X86_CPU(obj);
1584 CPUX86State *env = &cpu->env;
1585 int64_t value;
1587 value = env->cpuid_version & 0xf;
1588 visit_type_int(v, &value, name, errp);
1591 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1592 void *opaque, const char *name,
1593 Error **errp)
1595 X86CPU *cpu = X86_CPU(obj);
1596 CPUX86State *env = &cpu->env;
1597 const int64_t min = 0;
1598 const int64_t max = 0xf;
1599 Error *local_err = NULL;
1600 int64_t value;
1602 visit_type_int(v, &value, name, &local_err);
1603 if (local_err) {
1604 error_propagate(errp, local_err);
1605 return;
1607 if (value < min || value > max) {
1608 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1609 name ? name : "null", value, min, max);
1610 return;
1613 env->cpuid_version &= ~0xf;
1614 env->cpuid_version |= value & 0xf;
1617 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1619 X86CPU *cpu = X86_CPU(obj);
1620 CPUX86State *env = &cpu->env;
1621 char *value;
1623 value = g_malloc(CPUID_VENDOR_SZ + 1);
1624 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1625 env->cpuid_vendor3);
1626 return value;
1629 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1630 Error **errp)
1632 X86CPU *cpu = X86_CPU(obj);
1633 CPUX86State *env = &cpu->env;
1634 int i;
1636 if (strlen(value) != CPUID_VENDOR_SZ) {
1637 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1638 return;
1641 env->cpuid_vendor1 = 0;
1642 env->cpuid_vendor2 = 0;
1643 env->cpuid_vendor3 = 0;
1644 for (i = 0; i < 4; i++) {
1645 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1646 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1647 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1651 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1653 X86CPU *cpu = X86_CPU(obj);
1654 CPUX86State *env = &cpu->env;
1655 char *value;
1656 int i;
1658 value = g_malloc(48 + 1);
1659 for (i = 0; i < 48; i++) {
1660 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1662 value[48] = '\0';
1663 return value;
1666 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1667 Error **errp)
1669 X86CPU *cpu = X86_CPU(obj);
1670 CPUX86State *env = &cpu->env;
1671 int c, len, i;
1673 if (model_id == NULL) {
1674 model_id = "";
1676 len = strlen(model_id);
1677 memset(env->cpuid_model, 0, 48);
1678 for (i = 0; i < 48; i++) {
1679 if (i >= len) {
1680 c = '\0';
1681 } else {
1682 c = (uint8_t)model_id[i];
1684 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1688 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1689 const char *name, Error **errp)
1691 X86CPU *cpu = X86_CPU(obj);
1692 int64_t value;
1694 value = cpu->env.tsc_khz * 1000;
1695 visit_type_int(v, &value, name, errp);
1698 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1699 const char *name, Error **errp)
1701 X86CPU *cpu = X86_CPU(obj);
1702 const int64_t min = 0;
1703 const int64_t max = INT64_MAX;
1704 Error *local_err = NULL;
1705 int64_t value;
1707 visit_type_int(v, &value, name, &local_err);
1708 if (local_err) {
1709 error_propagate(errp, local_err);
1710 return;
1712 if (value < min || value > max) {
1713 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1714 name ? name : "null", value, min, max);
1715 return;
1718 cpu->env.tsc_khz = value / 1000;
1721 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1722 const char *name, Error **errp)
1724 X86CPU *cpu = X86_CPU(obj);
1725 int64_t value = cpu->apic_id;
1727 visit_type_int(v, &value, name, errp);
1730 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1731 const char *name, Error **errp)
1733 X86CPU *cpu = X86_CPU(obj);
1734 DeviceState *dev = DEVICE(obj);
1735 const int64_t min = 0;
1736 const int64_t max = UINT32_MAX;
1737 Error *error = NULL;
1738 int64_t value;
1740 if (dev->realized) {
1741 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1742 "it was realized", name, object_get_typename(obj));
1743 return;
1746 visit_type_int(v, &value, name, &error);
1747 if (error) {
1748 error_propagate(errp, error);
1749 return;
1751 if (value < min || value > max) {
1752 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1753 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1754 object_get_typename(obj), name, value, min, max);
1755 return;
1758 if ((value != cpu->apic_id) && cpu_exists(value)) {
1759 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1760 return;
1762 cpu->apic_id = value;
1765 /* Generic getter for "feature-words" and "filtered-features" properties */
1766 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1767 const char *name, Error **errp)
1769 uint32_t *array = (uint32_t *)opaque;
1770 FeatureWord w;
1771 Error *err = NULL;
1772 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1773 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1774 X86CPUFeatureWordInfoList *list = NULL;
1776 for (w = 0; w < FEATURE_WORDS; w++) {
1777 FeatureWordInfo *wi = &feature_word_info[w];
1778 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1779 qwi->cpuid_input_eax = wi->cpuid_eax;
1780 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1781 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1782 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1783 qwi->features = array[w];
1785 /* List will be in reverse order, but order shouldn't matter */
1786 list_entries[w].next = list;
1787 list_entries[w].value = &word_infos[w];
1788 list = &list_entries[w];
1791 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1792 error_propagate(errp, err);
1795 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1796 const char *name, Error **errp)
1798 X86CPU *cpu = X86_CPU(obj);
1799 int64_t value = cpu->hyperv_spinlock_attempts;
1801 visit_type_int(v, &value, name, errp);
1804 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1805 const char *name, Error **errp)
1807 const int64_t min = 0xFFF;
1808 const int64_t max = UINT_MAX;
1809 X86CPU *cpu = X86_CPU(obj);
1810 Error *err = NULL;
1811 int64_t value;
1813 visit_type_int(v, &value, name, &err);
1814 if (err) {
1815 error_propagate(errp, err);
1816 return;
1819 if (value < min || value > max) {
1820 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1821 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1822 object_get_typename(obj), name ? name : "null",
1823 value, min, max);
1824 return;
1826 cpu->hyperv_spinlock_attempts = value;
1829 static PropertyInfo qdev_prop_spinlocks = {
1830 .name = "int",
1831 .get = x86_get_hv_spinlocks,
1832 .set = x86_set_hv_spinlocks,
1835 /* Convert all '_' in a feature string option name to '-', to make feature
1836 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1838 static inline void feat2prop(char *s)
1840 while ((s = strchr(s, '_'))) {
1841 *s = '-';
1845 /* Parse "+feature,-feature,feature=foo" CPU feature string
1847 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1848 Error **errp)
1850 X86CPU *cpu = X86_CPU(cs);
1851 char *featurestr; /* Single 'key=value" string being parsed */
1852 FeatureWord w;
1853 /* Features to be added */
1854 FeatureWordArray plus_features = { 0 };
1855 /* Features to be removed */
1856 FeatureWordArray minus_features = { 0 };
1857 uint32_t numvalue;
1858 CPUX86State *env = &cpu->env;
1859 Error *local_err = NULL;
1861 featurestr = features ? strtok(features, ",") : NULL;
1863 while (featurestr) {
1864 char *val;
1865 if (featurestr[0] == '+') {
1866 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1867 } else if (featurestr[0] == '-') {
1868 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1869 } else if ((val = strchr(featurestr, '='))) {
1870 *val = 0; val++;
1871 feat2prop(featurestr);
1872 if (!strcmp(featurestr, "xlevel")) {
1873 char *err;
1874 char num[32];
1876 numvalue = strtoul(val, &err, 0);
1877 if (!*val || *err) {
1878 error_setg(errp, "bad numerical value %s", val);
1879 return;
1881 if (numvalue < 0x80000000) {
1882 error_report("xlevel value shall always be >= 0x80000000"
1883 ", fixup will be removed in future versions");
1884 numvalue += 0x80000000;
1886 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1887 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1888 } else if (!strcmp(featurestr, "tsc-freq")) {
1889 int64_t tsc_freq;
1890 char *err;
1891 char num[32];
1893 tsc_freq = strtosz_suffix_unit(val, &err,
1894 STRTOSZ_DEFSUFFIX_B, 1000);
1895 if (tsc_freq < 0 || *err) {
1896 error_setg(errp, "bad numerical value %s", val);
1897 return;
1899 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1900 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1901 &local_err);
1902 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1903 char *err;
1904 const int min = 0xFFF;
1905 char num[32];
1906 numvalue = strtoul(val, &err, 0);
1907 if (!*val || *err) {
1908 error_setg(errp, "bad numerical value %s", val);
1909 return;
1911 if (numvalue < min) {
1912 error_report("hv-spinlocks value shall always be >= 0x%x"
1913 ", fixup will be removed in future versions",
1914 min);
1915 numvalue = min;
1917 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1918 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1919 } else {
1920 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1922 } else {
1923 feat2prop(featurestr);
1924 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1926 if (local_err) {
1927 error_propagate(errp, local_err);
1928 return;
1930 featurestr = strtok(NULL, ",");
1933 if (cpu->host_features) {
1934 for (w = 0; w < FEATURE_WORDS; w++) {
1935 env->features[w] =
1936 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1940 for (w = 0; w < FEATURE_WORDS; w++) {
1941 env->features[w] |= plus_features[w];
1942 env->features[w] &= ~minus_features[w];
1946 /* Print all cpuid feature names in featureset
1948 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1950 int bit;
1951 bool first = true;
1953 for (bit = 0; bit < 32; bit++) {
1954 if (featureset[bit]) {
1955 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1956 first = false;
1961 /* generate CPU information. */
1962 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1964 X86CPUDefinition *def;
1965 char buf[256];
1966 int i;
1968 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1969 def = &builtin_x86_defs[i];
1970 snprintf(buf, sizeof(buf), "%s", def->name);
1971 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1973 #ifdef CONFIG_KVM
1974 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1975 "KVM processor with all supported host features "
1976 "(only available in KVM mode)");
1977 #endif
1979 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1980 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1981 FeatureWordInfo *fw = &feature_word_info[i];
1983 (*cpu_fprintf)(f, " ");
1984 listflags(f, cpu_fprintf, fw->feat_names);
1985 (*cpu_fprintf)(f, "\n");
1989 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1991 CpuDefinitionInfoList *cpu_list = NULL;
1992 X86CPUDefinition *def;
1993 int i;
1995 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1996 CpuDefinitionInfoList *entry;
1997 CpuDefinitionInfo *info;
1999 def = &builtin_x86_defs[i];
2000 info = g_malloc0(sizeof(*info));
2001 info->name = g_strdup(def->name);
2003 entry = g_malloc0(sizeof(*entry));
2004 entry->value = info;
2005 entry->next = cpu_list;
2006 cpu_list = entry;
2009 return cpu_list;
2012 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2013 bool migratable_only)
2015 FeatureWordInfo *wi = &feature_word_info[w];
2016 uint32_t r;
2018 if (kvm_enabled()) {
2019 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2020 wi->cpuid_ecx,
2021 wi->cpuid_reg);
2022 } else if (tcg_enabled()) {
2023 r = wi->tcg_features;
2024 } else {
2025 return ~0;
2027 if (migratable_only) {
2028 r &= x86_cpu_get_migratable_flags(w);
2030 return r;
2034 * Filters CPU feature words based on host availability of each feature.
2036 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2038 static int x86_cpu_filter_features(X86CPU *cpu)
2040 CPUX86State *env = &cpu->env;
2041 FeatureWord w;
2042 int rv = 0;
2044 for (w = 0; w < FEATURE_WORDS; w++) {
2045 uint32_t host_feat =
2046 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2047 uint32_t requested_features = env->features[w];
2048 env->features[w] &= host_feat;
2049 cpu->filtered_features[w] = requested_features & ~env->features[w];
2050 if (cpu->filtered_features[w]) {
2051 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2052 report_unavailable_features(w, cpu->filtered_features[w]);
2054 rv = 1;
2058 return rv;
2061 /* Load data from X86CPUDefinition
2063 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2065 CPUX86State *env = &cpu->env;
2066 const char *vendor;
2067 char host_vendor[CPUID_VENDOR_SZ + 1];
2068 FeatureWord w;
2070 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2071 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2072 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2073 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2074 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2075 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2076 cpu->cache_info_passthrough = def->cache_info_passthrough;
2077 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2078 for (w = 0; w < FEATURE_WORDS; w++) {
2079 env->features[w] = def->features[w];
2082 /* Special cases not set in the X86CPUDefinition structs: */
2083 if (kvm_enabled()) {
2084 FeatureWord w;
2085 for (w = 0; w < FEATURE_WORDS; w++) {
2086 env->features[w] |= kvm_default_features[w];
2087 env->features[w] &= ~kvm_default_unset_features[w];
2091 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2093 /* sysenter isn't supported in compatibility mode on AMD,
2094 * syscall isn't supported in compatibility mode on Intel.
2095 * Normally we advertise the actual CPU vendor, but you can
2096 * override this using the 'vendor' property if you want to use
2097 * KVM's sysenter/syscall emulation in compatibility mode and
2098 * when doing cross vendor migration
2100 vendor = def->vendor;
2101 if (kvm_enabled()) {
2102 uint32_t ebx = 0, ecx = 0, edx = 0;
2103 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2104 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2105 vendor = host_vendor;
2108 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2112 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2114 X86CPU *cpu = NULL;
2115 X86CPUClass *xcc;
2116 ObjectClass *oc;
2117 gchar **model_pieces;
2118 char *name, *features;
2119 Error *error = NULL;
2121 model_pieces = g_strsplit(cpu_model, ",", 2);
2122 if (!model_pieces[0]) {
2123 error_setg(&error, "Invalid/empty CPU model name");
2124 goto out;
2126 name = model_pieces[0];
2127 features = model_pieces[1];
2129 oc = x86_cpu_class_by_name(name);
2130 if (oc == NULL) {
2131 error_setg(&error, "Unable to find CPU definition: %s", name);
2132 goto out;
2134 xcc = X86_CPU_CLASS(oc);
2136 if (xcc->kvm_required && !kvm_enabled()) {
2137 error_setg(&error, "CPU model '%s' requires KVM", name);
2138 goto out;
2141 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2143 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2144 if (error) {
2145 goto out;
2148 out:
2149 if (error != NULL) {
2150 error_propagate(errp, error);
2151 if (cpu) {
2152 object_unref(OBJECT(cpu));
2153 cpu = NULL;
2156 g_strfreev(model_pieces);
2157 return cpu;
2160 X86CPU *cpu_x86_init(const char *cpu_model)
2162 Error *error = NULL;
2163 X86CPU *cpu;
2165 cpu = cpu_x86_create(cpu_model, &error);
2166 if (error) {
2167 goto out;
2170 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2172 out:
2173 if (error) {
2174 error_report_err(error);
2175 if (cpu != NULL) {
2176 object_unref(OBJECT(cpu));
2177 cpu = NULL;
2180 return cpu;
2183 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2185 X86CPUDefinition *cpudef = data;
2186 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2188 xcc->cpu_def = cpudef;
2191 static void x86_register_cpudef_type(X86CPUDefinition *def)
2193 char *typename = x86_cpu_type_name(def->name);
2194 TypeInfo ti = {
2195 .name = typename,
2196 .parent = TYPE_X86_CPU,
2197 .class_init = x86_cpu_cpudef_class_init,
2198 .class_data = def,
2201 type_register(&ti);
2202 g_free(typename);
2205 #if !defined(CONFIG_USER_ONLY)
2207 void cpu_clear_apic_feature(CPUX86State *env)
2209 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2212 #endif /* !CONFIG_USER_ONLY */
2214 /* Initialize list of CPU models, filling some non-static fields if necessary
2216 void x86_cpudef_setup(void)
2218 int i, j;
2219 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2221 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2222 X86CPUDefinition *def = &builtin_x86_defs[i];
2224 /* Look for specific "cpudef" models that */
2225 /* have the QEMU version in .model_id */
2226 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2227 if (strcmp(model_with_versions[j], def->name) == 0) {
2228 pstrcpy(def->model_id, sizeof(def->model_id),
2229 "QEMU Virtual CPU version ");
2230 pstrcat(def->model_id, sizeof(def->model_id),
2231 qemu_get_version());
2232 break;
2238 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2239 uint32_t *eax, uint32_t *ebx,
2240 uint32_t *ecx, uint32_t *edx)
2242 X86CPU *cpu = x86_env_get_cpu(env);
2243 CPUState *cs = CPU(cpu);
2245 /* test if maximum index reached */
2246 if (index & 0x80000000) {
2247 if (index > env->cpuid_xlevel) {
2248 if (env->cpuid_xlevel2 > 0) {
2249 /* Handle the Centaur's CPUID instruction. */
2250 if (index > env->cpuid_xlevel2) {
2251 index = env->cpuid_xlevel2;
2252 } else if (index < 0xC0000000) {
2253 index = env->cpuid_xlevel;
2255 } else {
2256 /* Intel documentation states that invalid EAX input will
2257 * return the same information as EAX=cpuid_level
2258 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2260 index = env->cpuid_level;
2263 } else {
2264 if (index > env->cpuid_level)
2265 index = env->cpuid_level;
2268 switch(index) {
2269 case 0:
2270 *eax = env->cpuid_level;
2271 *ebx = env->cpuid_vendor1;
2272 *edx = env->cpuid_vendor2;
2273 *ecx = env->cpuid_vendor3;
2274 break;
2275 case 1:
2276 *eax = env->cpuid_version;
2277 *ebx = (cpu->apic_id << 24) |
2278 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2279 *ecx = env->features[FEAT_1_ECX];
2280 *edx = env->features[FEAT_1_EDX];
2281 if (cs->nr_cores * cs->nr_threads > 1) {
2282 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2283 *edx |= 1 << 28; /* HTT bit */
2285 break;
2286 case 2:
2287 /* cache info: needed for Pentium Pro compatibility */
2288 if (cpu->cache_info_passthrough) {
2289 host_cpuid(index, 0, eax, ebx, ecx, edx);
2290 break;
2292 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2293 *ebx = 0;
2294 *ecx = 0;
2295 *edx = (L1D_DESCRIPTOR << 16) | \
2296 (L1I_DESCRIPTOR << 8) | \
2297 (L2_DESCRIPTOR);
2298 break;
2299 case 4:
2300 /* cache info: needed for Core compatibility */
2301 if (cpu->cache_info_passthrough) {
2302 host_cpuid(index, count, eax, ebx, ecx, edx);
2303 *eax &= ~0xFC000000;
2304 } else {
2305 *eax = 0;
2306 switch (count) {
2307 case 0: /* L1 dcache info */
2308 *eax |= CPUID_4_TYPE_DCACHE | \
2309 CPUID_4_LEVEL(1) | \
2310 CPUID_4_SELF_INIT_LEVEL;
2311 *ebx = (L1D_LINE_SIZE - 1) | \
2312 ((L1D_PARTITIONS - 1) << 12) | \
2313 ((L1D_ASSOCIATIVITY - 1) << 22);
2314 *ecx = L1D_SETS - 1;
2315 *edx = CPUID_4_NO_INVD_SHARING;
2316 break;
2317 case 1: /* L1 icache info */
2318 *eax |= CPUID_4_TYPE_ICACHE | \
2319 CPUID_4_LEVEL(1) | \
2320 CPUID_4_SELF_INIT_LEVEL;
2321 *ebx = (L1I_LINE_SIZE - 1) | \
2322 ((L1I_PARTITIONS - 1) << 12) | \
2323 ((L1I_ASSOCIATIVITY - 1) << 22);
2324 *ecx = L1I_SETS - 1;
2325 *edx = CPUID_4_NO_INVD_SHARING;
2326 break;
2327 case 2: /* L2 cache info */
2328 *eax |= CPUID_4_TYPE_UNIFIED | \
2329 CPUID_4_LEVEL(2) | \
2330 CPUID_4_SELF_INIT_LEVEL;
2331 if (cs->nr_threads > 1) {
2332 *eax |= (cs->nr_threads - 1) << 14;
2334 *ebx = (L2_LINE_SIZE - 1) | \
2335 ((L2_PARTITIONS - 1) << 12) | \
2336 ((L2_ASSOCIATIVITY - 1) << 22);
2337 *ecx = L2_SETS - 1;
2338 *edx = CPUID_4_NO_INVD_SHARING;
2339 break;
2340 default: /* end of info */
2341 *eax = 0;
2342 *ebx = 0;
2343 *ecx = 0;
2344 *edx = 0;
2345 break;
2349 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2350 if ((*eax & 31) && cs->nr_cores > 1) {
2351 *eax |= (cs->nr_cores - 1) << 26;
2353 break;
2354 case 5:
2355 /* mwait info: needed for Core compatibility */
2356 *eax = 0; /* Smallest monitor-line size in bytes */
2357 *ebx = 0; /* Largest monitor-line size in bytes */
2358 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2359 *edx = 0;
2360 break;
2361 case 6:
2362 /* Thermal and Power Leaf */
2363 *eax = env->features[FEAT_6_EAX];
2364 *ebx = 0;
2365 *ecx = 0;
2366 *edx = 0;
2367 break;
2368 case 7:
2369 /* Structured Extended Feature Flags Enumeration Leaf */
2370 if (count == 0) {
2371 *eax = 0; /* Maximum ECX value for sub-leaves */
2372 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2373 *ecx = 0; /* Reserved */
2374 *edx = 0; /* Reserved */
2375 } else {
2376 *eax = 0;
2377 *ebx = 0;
2378 *ecx = 0;
2379 *edx = 0;
2381 break;
2382 case 9:
2383 /* Direct Cache Access Information Leaf */
2384 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2385 *ebx = 0;
2386 *ecx = 0;
2387 *edx = 0;
2388 break;
2389 case 0xA:
2390 /* Architectural Performance Monitoring Leaf */
2391 if (kvm_enabled() && cpu->enable_pmu) {
2392 KVMState *s = cs->kvm_state;
2394 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2395 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2396 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2397 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2398 } else {
2399 *eax = 0;
2400 *ebx = 0;
2401 *ecx = 0;
2402 *edx = 0;
2404 break;
2405 case 0xD: {
2406 KVMState *s = cs->kvm_state;
2407 uint64_t kvm_mask;
2408 int i;
2410 /* Processor Extended State */
2411 *eax = 0;
2412 *ebx = 0;
2413 *ecx = 0;
2414 *edx = 0;
2415 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2416 break;
2418 kvm_mask =
2419 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2420 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2422 if (count == 0) {
2423 *ecx = 0x240;
2424 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2425 const ExtSaveArea *esa = &ext_save_areas[i];
2426 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2427 (kvm_mask & (1 << i)) != 0) {
2428 if (i < 32) {
2429 *eax |= 1 << i;
2430 } else {
2431 *edx |= 1 << (i - 32);
2433 *ecx = MAX(*ecx, esa->offset + esa->size);
2436 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2437 *ebx = *ecx;
2438 } else if (count == 1) {
2439 *eax = env->features[FEAT_XSAVE];
2440 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2441 const ExtSaveArea *esa = &ext_save_areas[count];
2442 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2443 (kvm_mask & (1 << count)) != 0) {
2444 *eax = esa->size;
2445 *ebx = esa->offset;
2448 break;
2450 case 0x80000000:
2451 *eax = env->cpuid_xlevel;
2452 *ebx = env->cpuid_vendor1;
2453 *edx = env->cpuid_vendor2;
2454 *ecx = env->cpuid_vendor3;
2455 break;
2456 case 0x80000001:
2457 *eax = env->cpuid_version;
2458 *ebx = 0;
2459 *ecx = env->features[FEAT_8000_0001_ECX];
2460 *edx = env->features[FEAT_8000_0001_EDX];
2462 /* The Linux kernel checks for the CMPLegacy bit and
2463 * discards multiple thread information if it is set.
2464 * So dont set it here for Intel to make Linux guests happy.
2466 if (cs->nr_cores * cs->nr_threads > 1) {
2467 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2468 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2469 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2470 *ecx |= 1 << 1; /* CmpLegacy bit */
2473 break;
2474 case 0x80000002:
2475 case 0x80000003:
2476 case 0x80000004:
2477 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2478 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2479 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2480 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2481 break;
2482 case 0x80000005:
2483 /* cache info (L1 cache) */
2484 if (cpu->cache_info_passthrough) {
2485 host_cpuid(index, 0, eax, ebx, ecx, edx);
2486 break;
2488 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2489 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2490 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2491 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2492 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2493 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2494 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2495 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2496 break;
2497 case 0x80000006:
2498 /* cache info (L2 cache) */
2499 if (cpu->cache_info_passthrough) {
2500 host_cpuid(index, 0, eax, ebx, ecx, edx);
2501 break;
2503 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2504 (L2_DTLB_2M_ENTRIES << 16) | \
2505 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2506 (L2_ITLB_2M_ENTRIES);
2507 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2508 (L2_DTLB_4K_ENTRIES << 16) | \
2509 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2510 (L2_ITLB_4K_ENTRIES);
2511 *ecx = (L2_SIZE_KB_AMD << 16) | \
2512 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2513 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2514 *edx = ((L3_SIZE_KB/512) << 18) | \
2515 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2516 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2517 break;
2518 case 0x80000007:
2519 *eax = 0;
2520 *ebx = 0;
2521 *ecx = 0;
2522 *edx = env->features[FEAT_8000_0007_EDX];
2523 break;
2524 case 0x80000008:
2525 /* virtual & phys address size in low 2 bytes. */
2526 /* XXX: This value must match the one used in the MMU code. */
2527 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2528 /* 64 bit processor */
2529 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2530 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2531 } else {
2532 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2533 *eax = 0x00000024; /* 36 bits physical */
2534 } else {
2535 *eax = 0x00000020; /* 32 bits physical */
2538 *ebx = 0;
2539 *ecx = 0;
2540 *edx = 0;
2541 if (cs->nr_cores * cs->nr_threads > 1) {
2542 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2544 break;
2545 case 0x8000000A:
2546 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2547 *eax = 0x00000001; /* SVM Revision */
2548 *ebx = 0x00000010; /* nr of ASIDs */
2549 *ecx = 0;
2550 *edx = env->features[FEAT_SVM]; /* optional features */
2551 } else {
2552 *eax = 0;
2553 *ebx = 0;
2554 *ecx = 0;
2555 *edx = 0;
2557 break;
2558 case 0xC0000000:
2559 *eax = env->cpuid_xlevel2;
2560 *ebx = 0;
2561 *ecx = 0;
2562 *edx = 0;
2563 break;
2564 case 0xC0000001:
2565 /* Support for VIA CPU's CPUID instruction */
2566 *eax = env->cpuid_version;
2567 *ebx = 0;
2568 *ecx = 0;
2569 *edx = env->features[FEAT_C000_0001_EDX];
2570 break;
2571 case 0xC0000002:
2572 case 0xC0000003:
2573 case 0xC0000004:
2574 /* Reserved for the future, and now filled with zero */
2575 *eax = 0;
2576 *ebx = 0;
2577 *ecx = 0;
2578 *edx = 0;
2579 break;
2580 default:
2581 /* reserved values: zero */
2582 *eax = 0;
2583 *ebx = 0;
2584 *ecx = 0;
2585 *edx = 0;
2586 break;
2590 /* CPUClass::reset() */
2591 static void x86_cpu_reset(CPUState *s)
2593 X86CPU *cpu = X86_CPU(s);
2594 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2595 CPUX86State *env = &cpu->env;
2596 int i;
2598 xcc->parent_reset(s);
2600 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2602 tlb_flush(s, 1);
2604 env->old_exception = -1;
2606 /* init to reset state */
2608 #ifdef CONFIG_SOFTMMU
2609 env->hflags |= HF_SOFTMMU_MASK;
2610 #endif
2611 env->hflags2 |= HF2_GIF_MASK;
2613 cpu_x86_update_cr0(env, 0x60000010);
2614 env->a20_mask = ~0x0;
2615 env->smbase = 0x30000;
2617 env->idt.limit = 0xffff;
2618 env->gdt.limit = 0xffff;
2619 env->ldt.limit = 0xffff;
2620 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2621 env->tr.limit = 0xffff;
2622 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2624 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2625 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2626 DESC_R_MASK | DESC_A_MASK);
2627 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2628 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2629 DESC_A_MASK);
2630 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2631 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2632 DESC_A_MASK);
2633 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2634 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2635 DESC_A_MASK);
2636 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2637 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2638 DESC_A_MASK);
2639 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2640 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2641 DESC_A_MASK);
2643 env->eip = 0xfff0;
2644 env->regs[R_EDX] = env->cpuid_version;
2646 env->eflags = 0x2;
2648 /* FPU init */
2649 for (i = 0; i < 8; i++) {
2650 env->fptags[i] = 1;
2652 cpu_set_fpuc(env, 0x37f);
2654 env->mxcsr = 0x1f80;
2655 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2657 env->pat = 0x0007040600070406ULL;
2658 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2660 memset(env->dr, 0, sizeof(env->dr));
2661 env->dr[6] = DR6_FIXED_1;
2662 env->dr[7] = DR7_FIXED_1;
2663 cpu_breakpoint_remove_all(s, BP_CPU);
2664 cpu_watchpoint_remove_all(s, BP_CPU);
2666 env->xcr0 = 1;
2669 * SDM 11.11.5 requires:
2670 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2671 * - IA32_MTRR_PHYSMASKn.V = 0
2672 * All other bits are undefined. For simplification, zero it all.
2674 env->mtrr_deftype = 0;
2675 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2676 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2678 #if !defined(CONFIG_USER_ONLY)
2679 /* We hard-wire the BSP to the first CPU. */
2680 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2682 s->halted = !cpu_is_bsp(cpu);
2684 if (kvm_enabled()) {
2685 kvm_arch_reset_vcpu(cpu);
2687 #endif
2690 #ifndef CONFIG_USER_ONLY
2691 bool cpu_is_bsp(X86CPU *cpu)
2693 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2696 /* TODO: remove me, when reset over QOM tree is implemented */
2697 static void x86_cpu_machine_reset_cb(void *opaque)
2699 X86CPU *cpu = opaque;
2700 cpu_reset(CPU(cpu));
2702 #endif
2704 static void mce_init(X86CPU *cpu)
2706 CPUX86State *cenv = &cpu->env;
2707 unsigned int bank;
2709 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2710 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2711 (CPUID_MCE | CPUID_MCA)) {
2712 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2713 cenv->mcg_ctl = ~(uint64_t)0;
2714 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2715 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2720 #ifndef CONFIG_USER_ONLY
2721 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2723 DeviceState *dev = DEVICE(cpu);
2724 APICCommonState *apic;
2725 const char *apic_type = "apic";
2727 if (kvm_irqchip_in_kernel()) {
2728 apic_type = "kvm-apic";
2729 } else if (xen_enabled()) {
2730 apic_type = "xen-apic";
2733 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2734 if (cpu->apic_state == NULL) {
2735 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2736 return;
2739 object_property_add_child(OBJECT(cpu), "apic",
2740 OBJECT(cpu->apic_state), NULL);
2741 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2742 /* TODO: convert to link<> */
2743 apic = APIC_COMMON(cpu->apic_state);
2744 apic->cpu = cpu;
2747 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2749 if (cpu->apic_state == NULL) {
2750 return;
2752 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2753 errp);
2756 static void x86_cpu_machine_done(Notifier *n, void *unused)
2758 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2759 MemoryRegion *smram =
2760 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2762 if (smram) {
2763 cpu->smram = g_new(MemoryRegion, 1);
2764 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2765 smram, 0, 1ull << 32);
2766 memory_region_set_enabled(cpu->smram, false);
2767 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2770 #else
2771 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2774 #endif
2777 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2778 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2779 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2780 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2781 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2782 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2783 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2785 CPUState *cs = CPU(dev);
2786 X86CPU *cpu = X86_CPU(dev);
2787 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2788 CPUX86State *env = &cpu->env;
2789 Error *local_err = NULL;
2790 static bool ht_warned;
2792 if (cpu->apic_id < 0) {
2793 error_setg(errp, "apic-id property was not initialized properly");
2794 return;
2797 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2798 env->cpuid_level = 7;
2801 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2802 * CPUID[1].EDX.
2804 if (IS_AMD_CPU(env)) {
2805 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2806 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2807 & CPUID_EXT2_AMD_ALIASES);
2811 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2812 error_setg(&local_err,
2813 kvm_enabled() ?
2814 "Host doesn't support requested features" :
2815 "TCG doesn't support requested features");
2816 goto out;
2819 #ifndef CONFIG_USER_ONLY
2820 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2822 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2823 x86_cpu_apic_create(cpu, &local_err);
2824 if (local_err != NULL) {
2825 goto out;
2828 #endif
2830 mce_init(cpu);
2832 #ifndef CONFIG_USER_ONLY
2833 if (tcg_enabled()) {
2834 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2835 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2836 cs->as = g_new(AddressSpace, 1);
2838 /* Outer container... */
2839 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2840 memory_region_set_enabled(cpu->cpu_as_root, true);
2842 /* ... with two regions inside: normal system memory with low
2843 * priority, and...
2845 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2846 get_system_memory(), 0, ~0ull);
2847 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2848 memory_region_set_enabled(cpu->cpu_as_mem, true);
2849 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2851 /* ... SMRAM with higher priority, linked from /machine/smram. */
2852 cpu->machine_done.notify = x86_cpu_machine_done;
2853 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2855 #endif
2857 qemu_init_vcpu(cs);
2859 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2860 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2861 * based on inputs (sockets,cores,threads), it is still better to gives
2862 * users a warning.
2864 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2865 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2867 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2868 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2869 " -smp options properly.");
2870 ht_warned = true;
2873 x86_cpu_apic_realize(cpu, &local_err);
2874 if (local_err != NULL) {
2875 goto out;
2877 cpu_reset(cs);
2879 xcc->parent_realize(dev, &local_err);
2881 out:
2882 if (local_err != NULL) {
2883 error_propagate(errp, local_err);
2884 return;
2888 typedef struct BitProperty {
2889 uint32_t *ptr;
2890 uint32_t mask;
2891 } BitProperty;
2893 static void x86_cpu_get_bit_prop(Object *obj,
2894 struct Visitor *v,
2895 void *opaque,
2896 const char *name,
2897 Error **errp)
2899 BitProperty *fp = opaque;
2900 bool value = (*fp->ptr & fp->mask) == fp->mask;
2901 visit_type_bool(v, &value, name, errp);
2904 static void x86_cpu_set_bit_prop(Object *obj,
2905 struct Visitor *v,
2906 void *opaque,
2907 const char *name,
2908 Error **errp)
2910 DeviceState *dev = DEVICE(obj);
2911 BitProperty *fp = opaque;
2912 Error *local_err = NULL;
2913 bool value;
2915 if (dev->realized) {
2916 qdev_prop_set_after_realize(dev, name, errp);
2917 return;
2920 visit_type_bool(v, &value, name, &local_err);
2921 if (local_err) {
2922 error_propagate(errp, local_err);
2923 return;
2926 if (value) {
2927 *fp->ptr |= fp->mask;
2928 } else {
2929 *fp->ptr &= ~fp->mask;
2933 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2934 void *opaque)
2936 BitProperty *prop = opaque;
2937 g_free(prop);
2940 /* Register a boolean property to get/set a single bit in a uint32_t field.
2942 * The same property name can be registered multiple times to make it affect
2943 * multiple bits in the same FeatureWord. In that case, the getter will return
2944 * true only if all bits are set.
2946 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2947 const char *prop_name,
2948 uint32_t *field,
2949 int bitnr)
2951 BitProperty *fp;
2952 ObjectProperty *op;
2953 uint32_t mask = (1UL << bitnr);
2955 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2956 if (op) {
2957 fp = op->opaque;
2958 assert(fp->ptr == field);
2959 fp->mask |= mask;
2960 } else {
2961 fp = g_new0(BitProperty, 1);
2962 fp->ptr = field;
2963 fp->mask = mask;
2964 object_property_add(OBJECT(cpu), prop_name, "bool",
2965 x86_cpu_get_bit_prop,
2966 x86_cpu_set_bit_prop,
2967 x86_cpu_release_bit_prop, fp, &error_abort);
2971 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2972 FeatureWord w,
2973 int bitnr)
2975 Object *obj = OBJECT(cpu);
2976 int i;
2977 char **names;
2978 FeatureWordInfo *fi = &feature_word_info[w];
2980 if (!fi->feat_names) {
2981 return;
2983 if (!fi->feat_names[bitnr]) {
2984 return;
2987 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
2989 feat2prop(names[0]);
2990 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
2992 for (i = 1; names[i]; i++) {
2993 feat2prop(names[i]);
2994 object_property_add_alias(obj, names[i], obj, names[0],
2995 &error_abort);
2998 g_strfreev(names);
3001 static void x86_cpu_initfn(Object *obj)
3003 CPUState *cs = CPU(obj);
3004 X86CPU *cpu = X86_CPU(obj);
3005 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3006 CPUX86State *env = &cpu->env;
3007 FeatureWord w;
3008 static int inited;
3010 cs->env_ptr = env;
3011 cpu_exec_init(cs, &error_abort);
3013 object_property_add(obj, "family", "int",
3014 x86_cpuid_version_get_family,
3015 x86_cpuid_version_set_family, NULL, NULL, NULL);
3016 object_property_add(obj, "model", "int",
3017 x86_cpuid_version_get_model,
3018 x86_cpuid_version_set_model, NULL, NULL, NULL);
3019 object_property_add(obj, "stepping", "int",
3020 x86_cpuid_version_get_stepping,
3021 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3022 object_property_add_str(obj, "vendor",
3023 x86_cpuid_get_vendor,
3024 x86_cpuid_set_vendor, NULL);
3025 object_property_add_str(obj, "model-id",
3026 x86_cpuid_get_model_id,
3027 x86_cpuid_set_model_id, NULL);
3028 object_property_add(obj, "tsc-frequency", "int",
3029 x86_cpuid_get_tsc_freq,
3030 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3031 object_property_add(obj, "apic-id", "int",
3032 x86_cpuid_get_apic_id,
3033 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3034 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3035 x86_cpu_get_feature_words,
3036 NULL, NULL, (void *)env->features, NULL);
3037 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3038 x86_cpu_get_feature_words,
3039 NULL, NULL, (void *)cpu->filtered_features, NULL);
3041 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3043 #ifndef CONFIG_USER_ONLY
3044 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3045 cpu->apic_id = -1;
3046 #endif
3048 for (w = 0; w < FEATURE_WORDS; w++) {
3049 int bitnr;
3051 for (bitnr = 0; bitnr < 32; bitnr++) {
3052 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3056 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3058 /* init various static tables used in TCG mode */
3059 if (tcg_enabled() && !inited) {
3060 inited = 1;
3061 optimize_flags_init();
3065 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3067 X86CPU *cpu = X86_CPU(cs);
3069 return cpu->apic_id;
3072 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3074 X86CPU *cpu = X86_CPU(cs);
3076 return cpu->env.cr[0] & CR0_PG_MASK;
3079 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3081 X86CPU *cpu = X86_CPU(cs);
3083 cpu->env.eip = value;
3086 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3088 X86CPU *cpu = X86_CPU(cs);
3090 cpu->env.eip = tb->pc - tb->cs_base;
3093 static bool x86_cpu_has_work(CPUState *cs)
3095 X86CPU *cpu = X86_CPU(cs);
3096 CPUX86State *env = &cpu->env;
3098 #if !defined(CONFIG_USER_ONLY)
3099 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3100 apic_poll_irq(cpu->apic_state);
3101 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
3103 #endif
3105 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3106 (env->eflags & IF_MASK)) ||
3107 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3108 CPU_INTERRUPT_INIT |
3109 CPU_INTERRUPT_SIPI |
3110 CPU_INTERRUPT_MCE)) ||
3111 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3112 !(env->hflags & HF_SMM_MASK));
3115 static Property x86_cpu_properties[] = {
3116 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3117 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3118 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3119 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3120 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3121 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3122 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3123 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3124 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3125 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3126 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3127 DEFINE_PROP_END_OF_LIST()
3130 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3132 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3133 CPUClass *cc = CPU_CLASS(oc);
3134 DeviceClass *dc = DEVICE_CLASS(oc);
3136 xcc->parent_realize = dc->realize;
3137 dc->realize = x86_cpu_realizefn;
3138 dc->bus_type = TYPE_ICC_BUS;
3139 dc->props = x86_cpu_properties;
3141 xcc->parent_reset = cc->reset;
3142 cc->reset = x86_cpu_reset;
3143 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3145 cc->class_by_name = x86_cpu_class_by_name;
3146 cc->parse_features = x86_cpu_parse_featurestr;
3147 cc->has_work = x86_cpu_has_work;
3148 cc->do_interrupt = x86_cpu_do_interrupt;
3149 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3150 cc->dump_state = x86_cpu_dump_state;
3151 cc->set_pc = x86_cpu_set_pc;
3152 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3153 cc->gdb_read_register = x86_cpu_gdb_read_register;
3154 cc->gdb_write_register = x86_cpu_gdb_write_register;
3155 cc->get_arch_id = x86_cpu_get_arch_id;
3156 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3157 #ifdef CONFIG_USER_ONLY
3158 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3159 #else
3160 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3161 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3162 cc->write_elf64_note = x86_cpu_write_elf64_note;
3163 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3164 cc->write_elf32_note = x86_cpu_write_elf32_note;
3165 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3166 cc->vmsd = &vmstate_x86_cpu;
3167 #endif
3168 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3169 #ifndef CONFIG_USER_ONLY
3170 cc->debug_excp_handler = breakpoint_handler;
3171 #endif
3172 cc->cpu_exec_enter = x86_cpu_exec_enter;
3173 cc->cpu_exec_exit = x86_cpu_exec_exit;
3176 static const TypeInfo x86_cpu_type_info = {
3177 .name = TYPE_X86_CPU,
3178 .parent = TYPE_CPU,
3179 .instance_size = sizeof(X86CPU),
3180 .instance_init = x86_cpu_initfn,
3181 .abstract = true,
3182 .class_size = sizeof(X86CPUClass),
3183 .class_init = x86_cpu_common_class_init,
3186 static void x86_cpu_register_types(void)
3188 int i;
3190 type_register_static(&x86_cpu_type_info);
3191 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3192 x86_register_cpudef_type(&builtin_x86_defs[i]);
3194 #ifdef CONFIG_KVM
3195 type_register_static(&host_x86_cpu_type_info);
3196 #endif
3199 type_init(x86_cpu_register_types)