target-i386: fix IvyBridge xlevel in PC_COMPAT_2_3
[qemu/cris-port.git] / target-i386 / cpu.c
blob7a779b1653a5cd3a3c493f21db5c3121ebad277c
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/xen/xen.h"
50 #include "hw/i386/apic_internal.h"
51 #endif
54 /* Cache topology CPUID constants: */
56 /* CPUID Leaf 2 Descriptors */
58 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
59 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
60 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
63 /* CPUID Leaf 4 constants: */
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* No L3 cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
141 /* TLB definitions: */
143 #define L1_DTLB_2M_ASSOC 1
144 #define L1_DTLB_2M_ENTRIES 255
145 #define L1_DTLB_4K_ASSOC 1
146 #define L1_DTLB_4K_ENTRIES 255
148 #define L1_ITLB_2M_ASSOC 1
149 #define L1_ITLB_2M_ENTRIES 255
150 #define L1_ITLB_4K_ASSOC 1
151 #define L1_ITLB_4K_ENTRIES 255
153 #define L2_DTLB_2M_ASSOC 0 /* disabled */
154 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
155 #define L2_DTLB_4K_ASSOC 4
156 #define L2_DTLB_4K_ENTRIES 512
158 #define L2_ITLB_2M_ASSOC 0 /* disabled */
159 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
160 #define L2_ITLB_4K_ASSOC 4
161 #define L2_ITLB_4K_ENTRIES 512
165 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
166 uint32_t vendor2, uint32_t vendor3)
168 int i;
169 for (i = 0; i < 4; i++) {
170 dst[i] = vendor1 >> (8 * i);
171 dst[i + 4] = vendor2 >> (8 * i);
172 dst[i + 8] = vendor3 >> (8 * i);
174 dst[CPUID_VENDOR_SZ] = '\0';
177 /* feature flags taken from "Intel Processor Identification and the CPUID
178 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
179 * between feature naming conventions, aliases may be added.
181 static const char *feature_name[] = {
182 "fpu", "vme", "de", "pse",
183 "tsc", "msr", "pae", "mce",
184 "cx8", "apic", NULL, "sep",
185 "mtrr", "pge", "mca", "cmov",
186 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
187 NULL, "ds" /* Intel dts */, "acpi", "mmx",
188 "fxsr", "sse", "sse2", "ss",
189 "ht" /* Intel htt */, "tm", "ia64", "pbe",
191 static const char *ext_feature_name[] = {
192 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
193 "ds_cpl", "vmx", "smx", "est",
194 "tm2", "ssse3", "cid", NULL,
195 "fma", "cx16", "xtpr", "pdcm",
196 NULL, "pcid", "dca", "sse4.1|sse4_1",
197 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
198 "tsc-deadline", "aes", "xsave", "osxsave",
199 "avx", "f16c", "rdrand", "hypervisor",
201 /* Feature names that are already defined on feature_name[] but are set on
202 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
203 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
204 * if and only if CPU vendor is AMD.
206 static const char *ext2_feature_name[] = {
207 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
208 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
209 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
210 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
211 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
212 "nx|xd", NULL, "mmxext", NULL /* mmx */,
213 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
214 NULL, "lm|i64", "3dnowext", "3dnow",
216 static const char *ext3_feature_name[] = {
217 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
218 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
219 "3dnowprefetch", "osvw", "ibs", "xop",
220 "skinit", "wdt", NULL, "lwp",
221 "fma4", "tce", NULL, "nodeid_msr",
222 NULL, "tbm", "topoext", "perfctr_core",
223 "perfctr_nb", NULL, NULL, NULL,
224 NULL, NULL, NULL, NULL,
227 static const char *ext4_feature_name[] = {
228 NULL, NULL, "xstore", "xstore-en",
229 NULL, NULL, "xcrypt", "xcrypt-en",
230 "ace2", "ace2-en", "phe", "phe-en",
231 "pmm", "pmm-en", NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 NULL, NULL, NULL, NULL,
238 static const char *kvm_feature_name[] = {
239 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
240 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 "kvmclock-stable-bit", NULL, NULL, NULL,
246 NULL, NULL, NULL, NULL,
249 static const char *svm_feature_name[] = {
250 "npt", "lbrv", "svm_lock", "nrip_save",
251 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
252 NULL, NULL, "pause_filter", NULL,
253 "pfthreshold", NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
260 static const char *cpuid_7_0_ebx_feature_name[] = {
261 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
262 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
263 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
264 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
267 static const char *cpuid_apm_edx_feature_name[] = {
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 "invtsc", NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 NULL, NULL, NULL, NULL,
278 static const char *cpuid_xsave_feature_name[] = {
279 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
289 static const char *cpuid_6_feature_name[] = {
290 NULL, NULL, "arat", NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
300 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
301 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
302 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
303 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
304 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
305 CPUID_PSE36 | CPUID_FXSR)
306 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
307 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
308 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
309 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
310 CPUID_PAE | CPUID_SEP | CPUID_APIC)
312 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
313 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
316 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
317 /* partly implemented:
318 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
319 /* missing:
320 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
321 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
322 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
323 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
324 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
325 /* missing:
326 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
327 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
328 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
329 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
330 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
331 CPUID_EXT_RDRAND */
333 #ifdef TARGET_X86_64
334 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
335 #else
336 #define TCG_EXT2_X86_64_FEATURES 0
337 #endif
339 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
340 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
341 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
342 TCG_EXT2_X86_64_FEATURES)
343 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
344 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
345 #define TCG_EXT4_FEATURES 0
346 #define TCG_SVM_FEATURES 0
347 #define TCG_KVM_FEATURES 0
348 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
349 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
350 /* missing:
351 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
352 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
353 CPUID_7_0_EBX_RDSEED */
354 #define TCG_APM_FEATURES 0
355 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
358 typedef struct FeatureWordInfo {
359 const char **feat_names;
360 uint32_t cpuid_eax; /* Input EAX for CPUID */
361 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
362 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
363 int cpuid_reg; /* output register (R_* constant) */
364 uint32_t tcg_features; /* Feature flags supported by TCG */
365 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
366 } FeatureWordInfo;
368 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
369 [FEAT_1_EDX] = {
370 .feat_names = feature_name,
371 .cpuid_eax = 1, .cpuid_reg = R_EDX,
372 .tcg_features = TCG_FEATURES,
374 [FEAT_1_ECX] = {
375 .feat_names = ext_feature_name,
376 .cpuid_eax = 1, .cpuid_reg = R_ECX,
377 .tcg_features = TCG_EXT_FEATURES,
379 [FEAT_8000_0001_EDX] = {
380 .feat_names = ext2_feature_name,
381 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
382 .tcg_features = TCG_EXT2_FEATURES,
384 [FEAT_8000_0001_ECX] = {
385 .feat_names = ext3_feature_name,
386 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
387 .tcg_features = TCG_EXT3_FEATURES,
389 [FEAT_C000_0001_EDX] = {
390 .feat_names = ext4_feature_name,
391 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
392 .tcg_features = TCG_EXT4_FEATURES,
394 [FEAT_KVM] = {
395 .feat_names = kvm_feature_name,
396 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
397 .tcg_features = TCG_KVM_FEATURES,
399 [FEAT_SVM] = {
400 .feat_names = svm_feature_name,
401 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
402 .tcg_features = TCG_SVM_FEATURES,
404 [FEAT_7_0_EBX] = {
405 .feat_names = cpuid_7_0_ebx_feature_name,
406 .cpuid_eax = 7,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
408 .cpuid_reg = R_EBX,
409 .tcg_features = TCG_7_0_EBX_FEATURES,
411 [FEAT_8000_0007_EDX] = {
412 .feat_names = cpuid_apm_edx_feature_name,
413 .cpuid_eax = 0x80000007,
414 .cpuid_reg = R_EDX,
415 .tcg_features = TCG_APM_FEATURES,
416 .unmigratable_flags = CPUID_APM_INVTSC,
418 [FEAT_XSAVE] = {
419 .feat_names = cpuid_xsave_feature_name,
420 .cpuid_eax = 0xd,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
422 .cpuid_reg = R_EAX,
423 .tcg_features = 0,
425 [FEAT_6_EAX] = {
426 .feat_names = cpuid_6_feature_name,
427 .cpuid_eax = 6, .cpuid_reg = R_EAX,
428 .tcg_features = TCG_6_EAX_FEATURES,
432 typedef struct X86RegisterInfo32 {
433 /* Name of register */
434 const char *name;
435 /* QAPI enum value register */
436 X86CPURegister32 qapi_enum;
437 } X86RegisterInfo32;
439 #define REGISTER(reg) \
440 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
441 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
442 REGISTER(EAX),
443 REGISTER(ECX),
444 REGISTER(EDX),
445 REGISTER(EBX),
446 REGISTER(ESP),
447 REGISTER(EBP),
448 REGISTER(ESI),
449 REGISTER(EDI),
451 #undef REGISTER
453 typedef struct ExtSaveArea {
454 uint32_t feature, bits;
455 uint32_t offset, size;
456 } ExtSaveArea;
458 static const ExtSaveArea ext_save_areas[] = {
459 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
460 .offset = 0x240, .size = 0x100 },
461 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
462 .offset = 0x3c0, .size = 0x40 },
463 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
464 .offset = 0x400, .size = 0x40 },
465 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
466 .offset = 0x440, .size = 0x40 },
467 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
468 .offset = 0x480, .size = 0x200 },
469 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
470 .offset = 0x680, .size = 0x400 },
473 const char *get_register_name_32(unsigned int reg)
475 if (reg >= CPU_NB_REGS32) {
476 return NULL;
478 return x86_reg_info_32[reg].name;
481 /* KVM-specific features that are automatically added to all CPU models
482 * when KVM is enabled.
484 static uint32_t kvm_default_features[FEATURE_WORDS] = {
485 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
486 (1 << KVM_FEATURE_NOP_IO_DELAY) |
487 (1 << KVM_FEATURE_CLOCKSOURCE2) |
488 (1 << KVM_FEATURE_ASYNC_PF) |
489 (1 << KVM_FEATURE_STEAL_TIME) |
490 (1 << KVM_FEATURE_PV_EOI) |
491 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
492 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
495 /* Features that are not added by default to any CPU model when KVM is enabled.
497 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
498 [FEAT_1_EDX] = CPUID_ACPI,
499 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
500 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
503 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
505 kvm_default_features[w] &= ~features;
508 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
510 kvm_default_unset_features[w] &= ~features;
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519 FeatureWordInfo *wi = &feature_word_info[w];
520 uint32_t r = 0;
521 int i;
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
527 continue;
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
531 continue;
533 r |= f;
535 return r;
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
541 uint32_t vec[4];
543 #ifdef __x86_64__
544 asm volatile("cpuid"
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
550 "cpuid \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
555 "popa"
556 : : "a"(function), "c"(count), "S"(vec)
557 : "memory", "cc");
558 #else
559 abort();
560 #endif
562 if (eax)
563 *eax = vec[0];
564 if (ebx)
565 *ebx = vec[1];
566 if (ecx)
567 *ecx = vec[2];
568 if (edx)
569 *edx = vec[3];
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
582 for (;;) {
583 if (!*s1 || !*s2 || *s1 != *s2)
584 return (*s1 - *s2);
585 ++s1, ++s2;
586 if (s1 == e1 && s2 == e2)
587 return (0);
588 else if (s1 == e1)
589 return (*s2);
590 else if (s2 == e2)
591 return (*s1);
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
600 static int altcmp(const char *s, const char *e, const char *altstr)
602 const char *p, *q;
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
606 ++p;
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 return (0);
609 if (!*p)
610 return (1);
611 else
612 q = ++p;
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
622 uint32_t mask;
623 const char **ppc;
624 bool found = false;
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
628 *pval |= mask;
629 found = true;
632 return found;
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
637 Error **errp)
639 FeatureWord w;
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
644 break;
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
652 /* CPU class name definitions: */
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
660 static char *x86_cpu_type_name(const char *model_name)
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667 ObjectClass *oc;
668 char *typename;
670 if (cpu_model == NULL) {
671 return NULL;
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
676 g_free(typename);
677 return oc;
680 struct X86CPUDefinition {
681 const char *name;
682 uint32_t level;
683 uint32_t xlevel;
684 uint32_t xlevel2;
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
687 int family;
688 int model;
689 int stepping;
690 FeatureWordArray features;
691 char model_id[48];
692 bool cache_info_passthrough;
695 static X86CPUDefinition builtin_x86_defs[] = {
697 .name = "qemu64",
698 .level = 0xd,
699 .vendor = CPUID_VENDOR_AMD,
700 .family = 6,
701 .model = 6,
702 .stepping = 3,
703 .features[FEAT_1_EDX] =
704 PPRO_FEATURES |
705 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 CPUID_PSE36,
707 .features[FEAT_1_ECX] =
708 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
709 .features[FEAT_8000_0001_EDX] =
710 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
711 .features[FEAT_8000_0001_ECX] =
712 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
713 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
714 .xlevel = 0x8000000A,
717 .name = "phenom",
718 .level = 5,
719 .vendor = CPUID_VENDOR_AMD,
720 .family = 16,
721 .model = 2,
722 .stepping = 3,
723 /* Missing: CPUID_HT */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 CPUID_EXT_POPCNT,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
733 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
734 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
735 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_CR8LEG,
737 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
738 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
739 .features[FEAT_8000_0001_ECX] =
740 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
741 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
742 /* Missing: CPUID_SVM_LBRV */
743 .features[FEAT_SVM] =
744 CPUID_SVM_NPT,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
749 .name = "core2duo",
750 .level = 10,
751 .vendor = CPUID_VENDOR_INTEL,
752 .family = 6,
753 .model = 15,
754 .stepping = 11,
755 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
756 .features[FEAT_1_EDX] =
757 PPRO_FEATURES |
758 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
759 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
760 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
761 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
762 .features[FEAT_1_ECX] =
763 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 CPUID_EXT_CX16,
765 .features[FEAT_8000_0001_EDX] =
766 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
767 .features[FEAT_8000_0001_ECX] =
768 CPUID_EXT3_LAHF_LM,
769 .xlevel = 0x80000008,
770 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
773 .name = "kvm64",
774 .level = 0xd,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 15,
777 .model = 6,
778 .stepping = 1,
779 /* Missing: CPUID_HT */
780 .features[FEAT_1_EDX] =
781 PPRO_FEATURES | CPUID_VME |
782 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 CPUID_PSE36,
784 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
790 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
791 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
792 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
793 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
794 .features[FEAT_8000_0001_ECX] =
796 .xlevel = 0x80000008,
797 .model_id = "Common KVM processor"
800 .name = "qemu32",
801 .level = 4,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 6,
805 .stepping = 3,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
810 .xlevel = 0x80000004,
813 .name = "kvm32",
814 .level = 5,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 15,
817 .model = 6,
818 .stepping = 1,
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3,
824 .features[FEAT_8000_0001_ECX] =
826 .xlevel = 0x80000008,
827 .model_id = "Common 32-bit KVM processor"
830 .name = "coreduo",
831 .level = 10,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 6,
834 .model = 14,
835 .stepping = 8,
836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
840 CPUID_SS,
841 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
842 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
843 .features[FEAT_1_ECX] =
844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_NX,
847 .xlevel = 0x80000008,
848 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
851 .name = "486",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 4,
855 .model = 8,
856 .stepping = 0,
857 .features[FEAT_1_EDX] =
858 I486_FEATURES,
859 .xlevel = 0,
862 .name = "pentium",
863 .level = 1,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 5,
866 .model = 4,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PENTIUM_FEATURES,
870 .xlevel = 0,
873 .name = "pentium2",
874 .level = 2,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 5,
878 .stepping = 2,
879 .features[FEAT_1_EDX] =
880 PENTIUM2_FEATURES,
881 .xlevel = 0,
884 .name = "pentium3",
885 .level = 3,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 6,
888 .model = 7,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM3_FEATURES,
892 .xlevel = 0,
895 .name = "athlon",
896 .level = 2,
897 .vendor = CPUID_VENDOR_AMD,
898 .family = 6,
899 .model = 2,
900 .stepping = 3,
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
903 CPUID_MCA,
904 .features[FEAT_8000_0001_EDX] =
905 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
906 .xlevel = 0x80000008,
909 .name = "n270",
910 .level = 10,
911 .vendor = CPUID_VENDOR_INTEL,
912 .family = 6,
913 .model = 28,
914 .stepping = 2,
915 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
916 .features[FEAT_1_EDX] =
917 PPRO_FEATURES |
918 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
919 CPUID_ACPI | CPUID_SS,
920 /* Some CPUs got no CPUID_SEP */
921 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
922 * CPUID_EXT_XTPR */
923 .features[FEAT_1_ECX] =
924 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
925 CPUID_EXT_MOVBE,
926 .features[FEAT_8000_0001_EDX] =
927 CPUID_EXT2_NX,
928 .features[FEAT_8000_0001_ECX] =
929 CPUID_EXT3_LAHF_LM,
930 .xlevel = 0x80000008,
931 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
934 .name = "Conroe",
935 .level = 10,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 15,
939 .stepping = 3,
940 .features[FEAT_1_EDX] =
941 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
942 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
943 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
944 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
945 CPUID_DE | CPUID_FP87,
946 .features[FEAT_1_ECX] =
947 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
948 .features[FEAT_8000_0001_EDX] =
949 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
950 .features[FEAT_8000_0001_ECX] =
951 CPUID_EXT3_LAHF_LM,
952 .xlevel = 0x80000008,
953 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
956 .name = "Penryn",
957 .level = 10,
958 .vendor = CPUID_VENDOR_INTEL,
959 .family = 6,
960 .model = 23,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
967 CPUID_DE | CPUID_FP87,
968 .features[FEAT_1_ECX] =
969 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
970 CPUID_EXT_SSE3,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
973 .features[FEAT_8000_0001_ECX] =
974 CPUID_EXT3_LAHF_LM,
975 .xlevel = 0x80000008,
976 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
979 .name = "Nehalem",
980 .level = 11,
981 .vendor = CPUID_VENDOR_INTEL,
982 .family = 6,
983 .model = 26,
984 .stepping = 3,
985 .features[FEAT_1_EDX] =
986 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
987 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
988 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
989 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
990 CPUID_DE | CPUID_FP87,
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
993 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
994 .features[FEAT_8000_0001_EDX] =
995 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
996 .features[FEAT_8000_0001_ECX] =
997 CPUID_EXT3_LAHF_LM,
998 .xlevel = 0x80000008,
999 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1002 .name = "Westmere",
1003 .level = 11,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 44,
1007 .stepping = 1,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1016 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1017 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1018 .features[FEAT_8000_0001_EDX] =
1019 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1020 .features[FEAT_8000_0001_ECX] =
1021 CPUID_EXT3_LAHF_LM,
1022 .features[FEAT_6_EAX] =
1023 CPUID_6_EAX_ARAT,
1024 .xlevel = 0x80000008,
1025 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1028 .name = "SandyBridge",
1029 .level = 0xd,
1030 .vendor = CPUID_VENDOR_INTEL,
1031 .family = 6,
1032 .model = 42,
1033 .stepping = 1,
1034 .features[FEAT_1_EDX] =
1035 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1036 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1037 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1038 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1039 CPUID_DE | CPUID_FP87,
1040 .features[FEAT_1_ECX] =
1041 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1042 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1043 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1044 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1045 CPUID_EXT_SSE3,
1046 .features[FEAT_8000_0001_EDX] =
1047 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1048 CPUID_EXT2_SYSCALL,
1049 .features[FEAT_8000_0001_ECX] =
1050 CPUID_EXT3_LAHF_LM,
1051 .features[FEAT_XSAVE] =
1052 CPUID_XSAVE_XSAVEOPT,
1053 .features[FEAT_6_EAX] =
1054 CPUID_6_EAX_ARAT,
1055 .xlevel = 0x80000008,
1056 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1059 .name = "IvyBridge",
1060 .level = 0xd,
1061 .vendor = CPUID_VENDOR_INTEL,
1062 .family = 6,
1063 .model = 58,
1064 .stepping = 9,
1065 .features[FEAT_1_EDX] =
1066 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1067 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1068 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1069 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1070 CPUID_DE | CPUID_FP87,
1071 .features[FEAT_1_ECX] =
1072 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1073 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1074 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1075 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1076 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1077 .features[FEAT_7_0_EBX] =
1078 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1079 CPUID_7_0_EBX_ERMS,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1082 CPUID_EXT2_SYSCALL,
1083 .features[FEAT_8000_0001_ECX] =
1084 CPUID_EXT3_LAHF_LM,
1085 .features[FEAT_XSAVE] =
1086 CPUID_XSAVE_XSAVEOPT,
1087 .features[FEAT_6_EAX] =
1088 CPUID_6_EAX_ARAT,
1089 .xlevel = 0x80000008,
1090 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1093 .name = "Haswell-noTSX",
1094 .level = 0xd,
1095 .vendor = CPUID_VENDOR_INTEL,
1096 .family = 6,
1097 .model = 60,
1098 .stepping = 1,
1099 .features[FEAT_1_EDX] =
1100 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1101 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1102 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1103 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1104 CPUID_DE | CPUID_FP87,
1105 .features[FEAT_1_ECX] =
1106 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1107 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1108 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1109 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1111 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1112 .features[FEAT_8000_0001_EDX] =
1113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1114 CPUID_EXT2_SYSCALL,
1115 .features[FEAT_8000_0001_ECX] =
1116 CPUID_EXT3_LAHF_LM,
1117 .features[FEAT_7_0_EBX] =
1118 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1119 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1120 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1121 .features[FEAT_XSAVE] =
1122 CPUID_XSAVE_XSAVEOPT,
1123 .features[FEAT_6_EAX] =
1124 CPUID_6_EAX_ARAT,
1125 .xlevel = 0x80000008,
1126 .model_id = "Intel Core Processor (Haswell, no TSX)",
1127 }, {
1128 .name = "Haswell",
1129 .level = 0xd,
1130 .vendor = CPUID_VENDOR_INTEL,
1131 .family = 6,
1132 .model = 60,
1133 .stepping = 1,
1134 .features[FEAT_1_EDX] =
1135 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1136 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1137 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1138 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1139 CPUID_DE | CPUID_FP87,
1140 .features[FEAT_1_ECX] =
1141 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1142 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1143 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1144 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1145 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1146 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1149 CPUID_EXT2_SYSCALL,
1150 .features[FEAT_8000_0001_ECX] =
1151 CPUID_EXT3_LAHF_LM,
1152 .features[FEAT_7_0_EBX] =
1153 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1154 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1155 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1156 CPUID_7_0_EBX_RTM,
1157 .features[FEAT_XSAVE] =
1158 CPUID_XSAVE_XSAVEOPT,
1159 .features[FEAT_6_EAX] =
1160 CPUID_6_EAX_ARAT,
1161 .xlevel = 0x80000008,
1162 .model_id = "Intel Core Processor (Haswell)",
1165 .name = "Broadwell-noTSX",
1166 .level = 0xd,
1167 .vendor = CPUID_VENDOR_INTEL,
1168 .family = 6,
1169 .model = 61,
1170 .stepping = 2,
1171 .features[FEAT_1_EDX] =
1172 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1173 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1174 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1175 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1176 CPUID_DE | CPUID_FP87,
1177 .features[FEAT_1_ECX] =
1178 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1179 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1180 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1181 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1182 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1183 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1184 .features[FEAT_8000_0001_EDX] =
1185 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1186 CPUID_EXT2_SYSCALL,
1187 .features[FEAT_8000_0001_ECX] =
1188 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1189 .features[FEAT_7_0_EBX] =
1190 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1191 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1192 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1193 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1194 CPUID_7_0_EBX_SMAP,
1195 .features[FEAT_XSAVE] =
1196 CPUID_XSAVE_XSAVEOPT,
1197 .features[FEAT_6_EAX] =
1198 CPUID_6_EAX_ARAT,
1199 .xlevel = 0x80000008,
1200 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1203 .name = "Broadwell",
1204 .level = 0xd,
1205 .vendor = CPUID_VENDOR_INTEL,
1206 .family = 6,
1207 .model = 61,
1208 .stepping = 2,
1209 .features[FEAT_1_EDX] =
1210 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1211 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1212 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1213 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1214 CPUID_DE | CPUID_FP87,
1215 .features[FEAT_1_ECX] =
1216 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1217 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1218 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1219 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1220 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1221 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1222 .features[FEAT_8000_0001_EDX] =
1223 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1224 CPUID_EXT2_SYSCALL,
1225 .features[FEAT_8000_0001_ECX] =
1226 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1227 .features[FEAT_7_0_EBX] =
1228 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1229 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1230 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1231 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1232 CPUID_7_0_EBX_SMAP,
1233 .features[FEAT_XSAVE] =
1234 CPUID_XSAVE_XSAVEOPT,
1235 .features[FEAT_6_EAX] =
1236 CPUID_6_EAX_ARAT,
1237 .xlevel = 0x80000008,
1238 .model_id = "Intel Core Processor (Broadwell)",
1241 .name = "Opteron_G1",
1242 .level = 5,
1243 .vendor = CPUID_VENDOR_AMD,
1244 .family = 15,
1245 .model = 6,
1246 .stepping = 1,
1247 .features[FEAT_1_EDX] =
1248 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1249 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1250 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1251 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1252 CPUID_DE | CPUID_FP87,
1253 .features[FEAT_1_ECX] =
1254 CPUID_EXT_SSE3,
1255 .features[FEAT_8000_0001_EDX] =
1256 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1257 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1258 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1259 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1260 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1261 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1262 .xlevel = 0x80000008,
1263 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1266 .name = "Opteron_G2",
1267 .level = 5,
1268 .vendor = CPUID_VENDOR_AMD,
1269 .family = 15,
1270 .model = 6,
1271 .stepping = 1,
1272 .features[FEAT_1_EDX] =
1273 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1274 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1275 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1276 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1277 CPUID_DE | CPUID_FP87,
1278 .features[FEAT_1_ECX] =
1279 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1280 .features[FEAT_8000_0001_EDX] =
1281 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1282 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1283 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1284 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1285 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1286 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1287 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1288 .features[FEAT_8000_0001_ECX] =
1289 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1290 .xlevel = 0x80000008,
1291 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1294 .name = "Opteron_G3",
1295 .level = 5,
1296 .vendor = CPUID_VENDOR_AMD,
1297 .family = 15,
1298 .model = 6,
1299 .stepping = 1,
1300 .features[FEAT_1_EDX] =
1301 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1302 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1303 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1304 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1305 CPUID_DE | CPUID_FP87,
1306 .features[FEAT_1_ECX] =
1307 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1308 CPUID_EXT_SSE3,
1309 .features[FEAT_8000_0001_EDX] =
1310 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1311 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1312 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1313 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1314 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1315 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1316 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .features[FEAT_8000_0001_ECX] =
1318 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1319 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1320 .xlevel = 0x80000008,
1321 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1324 .name = "Opteron_G4",
1325 .level = 0xd,
1326 .vendor = CPUID_VENDOR_AMD,
1327 .family = 21,
1328 .model = 1,
1329 .stepping = 2,
1330 .features[FEAT_1_EDX] =
1331 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1332 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1333 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1334 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1335 CPUID_DE | CPUID_FP87,
1336 .features[FEAT_1_ECX] =
1337 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1338 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1339 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1340 CPUID_EXT_SSE3,
1341 .features[FEAT_8000_0001_EDX] =
1342 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1343 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1344 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1345 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1346 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1347 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1348 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1349 .features[FEAT_8000_0001_ECX] =
1350 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1351 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1352 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1353 CPUID_EXT3_LAHF_LM,
1354 /* no xsaveopt! */
1355 .xlevel = 0x8000001A,
1356 .model_id = "AMD Opteron 62xx class CPU",
1359 .name = "Opteron_G5",
1360 .level = 0xd,
1361 .vendor = CPUID_VENDOR_AMD,
1362 .family = 21,
1363 .model = 2,
1364 .stepping = 0,
1365 .features[FEAT_1_EDX] =
1366 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1367 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1368 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1369 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1370 CPUID_DE | CPUID_FP87,
1371 .features[FEAT_1_ECX] =
1372 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1373 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1374 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1375 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1376 .features[FEAT_8000_0001_EDX] =
1377 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1378 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1379 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1380 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1381 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1382 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1383 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1384 .features[FEAT_8000_0001_ECX] =
1385 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1386 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1387 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1388 CPUID_EXT3_LAHF_LM,
1389 /* no xsaveopt! */
1390 .xlevel = 0x8000001A,
1391 .model_id = "AMD Opteron 63xx class CPU",
1396 * x86_cpu_compat_set_features:
1397 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1398 * @w: Identifies the feature word to be changed.
1399 * @feat_add: Feature bits to be added to feature word
1400 * @feat_remove: Feature bits to be removed from feature word
1402 * Change CPU model feature bits for compatibility.
1404 * This function may be used by machine-type compatibility functions
1405 * to enable or disable feature bits on specific CPU models.
1407 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1408 uint32_t feat_add, uint32_t feat_remove)
1410 X86CPUDefinition *def;
1411 int i;
1412 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1413 def = &builtin_x86_defs[i];
1414 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1415 def->features[w] |= feat_add;
1416 def->features[w] &= ~feat_remove;
1421 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1422 bool migratable_only);
1424 #ifdef CONFIG_KVM
1426 static int cpu_x86_fill_model_id(char *str)
1428 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1429 int i;
1431 for (i = 0; i < 3; i++) {
1432 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1433 memcpy(str + i * 16 + 0, &eax, 4);
1434 memcpy(str + i * 16 + 4, &ebx, 4);
1435 memcpy(str + i * 16 + 8, &ecx, 4);
1436 memcpy(str + i * 16 + 12, &edx, 4);
1438 return 0;
1441 static X86CPUDefinition host_cpudef;
1443 static Property host_x86_cpu_properties[] = {
1444 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1445 DEFINE_PROP_END_OF_LIST()
1448 /* class_init for the "host" CPU model
1450 * This function may be called before KVM is initialized.
1452 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1454 DeviceClass *dc = DEVICE_CLASS(oc);
1455 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1456 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1458 xcc->kvm_required = true;
1460 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1461 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1463 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1464 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1465 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1466 host_cpudef.stepping = eax & 0x0F;
1468 cpu_x86_fill_model_id(host_cpudef.model_id);
1470 xcc->cpu_def = &host_cpudef;
1471 host_cpudef.cache_info_passthrough = true;
1473 /* level, xlevel, xlevel2, and the feature words are initialized on
1474 * instance_init, because they require KVM to be initialized.
1477 dc->props = host_x86_cpu_properties;
1480 static void host_x86_cpu_initfn(Object *obj)
1482 X86CPU *cpu = X86_CPU(obj);
1483 CPUX86State *env = &cpu->env;
1484 KVMState *s = kvm_state;
1486 assert(kvm_enabled());
1488 /* We can't fill the features array here because we don't know yet if
1489 * "migratable" is true or false.
1491 cpu->host_features = true;
1493 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1494 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1495 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1497 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1500 static const TypeInfo host_x86_cpu_type_info = {
1501 .name = X86_CPU_TYPE_NAME("host"),
1502 .parent = TYPE_X86_CPU,
1503 .instance_init = host_x86_cpu_initfn,
1504 .class_init = host_x86_cpu_class_init,
1507 #endif
1509 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1511 FeatureWordInfo *f = &feature_word_info[w];
1512 int i;
1514 for (i = 0; i < 32; ++i) {
1515 if (1 << i & mask) {
1516 const char *reg = get_register_name_32(f->cpuid_reg);
1517 assert(reg);
1518 fprintf(stderr, "warning: %s doesn't support requested feature: "
1519 "CPUID.%02XH:%s%s%s [bit %d]\n",
1520 kvm_enabled() ? "host" : "TCG",
1521 f->cpuid_eax, reg,
1522 f->feat_names[i] ? "." : "",
1523 f->feat_names[i] ? f->feat_names[i] : "", i);
1528 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1529 const char *name, Error **errp)
1531 X86CPU *cpu = X86_CPU(obj);
1532 CPUX86State *env = &cpu->env;
1533 int64_t value;
1535 value = (env->cpuid_version >> 8) & 0xf;
1536 if (value == 0xf) {
1537 value += (env->cpuid_version >> 20) & 0xff;
1539 visit_type_int(v, &value, name, errp);
1542 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1543 const char *name, Error **errp)
1545 X86CPU *cpu = X86_CPU(obj);
1546 CPUX86State *env = &cpu->env;
1547 const int64_t min = 0;
1548 const int64_t max = 0xff + 0xf;
1549 Error *local_err = NULL;
1550 int64_t value;
1552 visit_type_int(v, &value, name, &local_err);
1553 if (local_err) {
1554 error_propagate(errp, local_err);
1555 return;
1557 if (value < min || value > max) {
1558 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1559 name ? name : "null", value, min, max);
1560 return;
1563 env->cpuid_version &= ~0xff00f00;
1564 if (value > 0x0f) {
1565 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1566 } else {
1567 env->cpuid_version |= value << 8;
1571 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1572 const char *name, Error **errp)
1574 X86CPU *cpu = X86_CPU(obj);
1575 CPUX86State *env = &cpu->env;
1576 int64_t value;
1578 value = (env->cpuid_version >> 4) & 0xf;
1579 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1580 visit_type_int(v, &value, name, errp);
1583 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1584 const char *name, Error **errp)
1586 X86CPU *cpu = X86_CPU(obj);
1587 CPUX86State *env = &cpu->env;
1588 const int64_t min = 0;
1589 const int64_t max = 0xff;
1590 Error *local_err = NULL;
1591 int64_t value;
1593 visit_type_int(v, &value, name, &local_err);
1594 if (local_err) {
1595 error_propagate(errp, local_err);
1596 return;
1598 if (value < min || value > max) {
1599 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1600 name ? name : "null", value, min, max);
1601 return;
1604 env->cpuid_version &= ~0xf00f0;
1605 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1608 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1609 void *opaque, const char *name,
1610 Error **errp)
1612 X86CPU *cpu = X86_CPU(obj);
1613 CPUX86State *env = &cpu->env;
1614 int64_t value;
1616 value = env->cpuid_version & 0xf;
1617 visit_type_int(v, &value, name, errp);
1620 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1621 void *opaque, const char *name,
1622 Error **errp)
1624 X86CPU *cpu = X86_CPU(obj);
1625 CPUX86State *env = &cpu->env;
1626 const int64_t min = 0;
1627 const int64_t max = 0xf;
1628 Error *local_err = NULL;
1629 int64_t value;
1631 visit_type_int(v, &value, name, &local_err);
1632 if (local_err) {
1633 error_propagate(errp, local_err);
1634 return;
1636 if (value < min || value > max) {
1637 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1638 name ? name : "null", value, min, max);
1639 return;
1642 env->cpuid_version &= ~0xf;
1643 env->cpuid_version |= value & 0xf;
1646 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1648 X86CPU *cpu = X86_CPU(obj);
1649 CPUX86State *env = &cpu->env;
1650 char *value;
1652 value = g_malloc(CPUID_VENDOR_SZ + 1);
1653 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1654 env->cpuid_vendor3);
1655 return value;
1658 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1659 Error **errp)
1661 X86CPU *cpu = X86_CPU(obj);
1662 CPUX86State *env = &cpu->env;
1663 int i;
1665 if (strlen(value) != CPUID_VENDOR_SZ) {
1666 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1667 return;
1670 env->cpuid_vendor1 = 0;
1671 env->cpuid_vendor2 = 0;
1672 env->cpuid_vendor3 = 0;
1673 for (i = 0; i < 4; i++) {
1674 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1675 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1676 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1680 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1682 X86CPU *cpu = X86_CPU(obj);
1683 CPUX86State *env = &cpu->env;
1684 char *value;
1685 int i;
1687 value = g_malloc(48 + 1);
1688 for (i = 0; i < 48; i++) {
1689 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1691 value[48] = '\0';
1692 return value;
1695 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1696 Error **errp)
1698 X86CPU *cpu = X86_CPU(obj);
1699 CPUX86State *env = &cpu->env;
1700 int c, len, i;
1702 if (model_id == NULL) {
1703 model_id = "";
1705 len = strlen(model_id);
1706 memset(env->cpuid_model, 0, 48);
1707 for (i = 0; i < 48; i++) {
1708 if (i >= len) {
1709 c = '\0';
1710 } else {
1711 c = (uint8_t)model_id[i];
1713 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1717 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1718 const char *name, Error **errp)
1720 X86CPU *cpu = X86_CPU(obj);
1721 int64_t value;
1723 value = cpu->env.tsc_khz * 1000;
1724 visit_type_int(v, &value, name, errp);
1727 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1728 const char *name, Error **errp)
1730 X86CPU *cpu = X86_CPU(obj);
1731 const int64_t min = 0;
1732 const int64_t max = INT64_MAX;
1733 Error *local_err = NULL;
1734 int64_t value;
1736 visit_type_int(v, &value, name, &local_err);
1737 if (local_err) {
1738 error_propagate(errp, local_err);
1739 return;
1741 if (value < min || value > max) {
1742 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1743 name ? name : "null", value, min, max);
1744 return;
1747 cpu->env.tsc_khz = value / 1000;
1750 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1751 const char *name, Error **errp)
1753 X86CPU *cpu = X86_CPU(obj);
1754 int64_t value = cpu->apic_id;
1756 visit_type_int(v, &value, name, errp);
1759 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1760 const char *name, Error **errp)
1762 X86CPU *cpu = X86_CPU(obj);
1763 DeviceState *dev = DEVICE(obj);
1764 const int64_t min = 0;
1765 const int64_t max = UINT32_MAX;
1766 Error *error = NULL;
1767 int64_t value;
1769 if (dev->realized) {
1770 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1771 "it was realized", name, object_get_typename(obj));
1772 return;
1775 visit_type_int(v, &value, name, &error);
1776 if (error) {
1777 error_propagate(errp, error);
1778 return;
1780 if (value < min || value > max) {
1781 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1782 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1783 object_get_typename(obj), name, value, min, max);
1784 return;
1787 if ((value != cpu->apic_id) && cpu_exists(value)) {
1788 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1789 return;
1791 cpu->apic_id = value;
1794 /* Generic getter for "feature-words" and "filtered-features" properties */
1795 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1796 const char *name, Error **errp)
1798 uint32_t *array = (uint32_t *)opaque;
1799 FeatureWord w;
1800 Error *err = NULL;
1801 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1802 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1803 X86CPUFeatureWordInfoList *list = NULL;
1805 for (w = 0; w < FEATURE_WORDS; w++) {
1806 FeatureWordInfo *wi = &feature_word_info[w];
1807 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1808 qwi->cpuid_input_eax = wi->cpuid_eax;
1809 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1810 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1811 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1812 qwi->features = array[w];
1814 /* List will be in reverse order, but order shouldn't matter */
1815 list_entries[w].next = list;
1816 list_entries[w].value = &word_infos[w];
1817 list = &list_entries[w];
1820 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1821 error_propagate(errp, err);
1824 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1825 const char *name, Error **errp)
1827 X86CPU *cpu = X86_CPU(obj);
1828 int64_t value = cpu->hyperv_spinlock_attempts;
1830 visit_type_int(v, &value, name, errp);
1833 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1834 const char *name, Error **errp)
1836 const int64_t min = 0xFFF;
1837 const int64_t max = UINT_MAX;
1838 X86CPU *cpu = X86_CPU(obj);
1839 Error *err = NULL;
1840 int64_t value;
1842 visit_type_int(v, &value, name, &err);
1843 if (err) {
1844 error_propagate(errp, err);
1845 return;
1848 if (value < min || value > max) {
1849 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1850 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1851 object_get_typename(obj), name ? name : "null",
1852 value, min, max);
1853 return;
1855 cpu->hyperv_spinlock_attempts = value;
1858 static PropertyInfo qdev_prop_spinlocks = {
1859 .name = "int",
1860 .get = x86_get_hv_spinlocks,
1861 .set = x86_set_hv_spinlocks,
1864 /* Convert all '_' in a feature string option name to '-', to make feature
1865 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1867 static inline void feat2prop(char *s)
1869 while ((s = strchr(s, '_'))) {
1870 *s = '-';
1874 /* Parse "+feature,-feature,feature=foo" CPU feature string
1876 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1877 Error **errp)
1879 X86CPU *cpu = X86_CPU(cs);
1880 char *featurestr; /* Single 'key=value" string being parsed */
1881 FeatureWord w;
1882 /* Features to be added */
1883 FeatureWordArray plus_features = { 0 };
1884 /* Features to be removed */
1885 FeatureWordArray minus_features = { 0 };
1886 uint32_t numvalue;
1887 CPUX86State *env = &cpu->env;
1888 Error *local_err = NULL;
1890 featurestr = features ? strtok(features, ",") : NULL;
1892 while (featurestr) {
1893 char *val;
1894 if (featurestr[0] == '+') {
1895 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1896 } else if (featurestr[0] == '-') {
1897 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1898 } else if ((val = strchr(featurestr, '='))) {
1899 *val = 0; val++;
1900 feat2prop(featurestr);
1901 if (!strcmp(featurestr, "xlevel")) {
1902 char *err;
1903 char num[32];
1905 numvalue = strtoul(val, &err, 0);
1906 if (!*val || *err) {
1907 error_setg(errp, "bad numerical value %s", val);
1908 return;
1910 if (numvalue < 0x80000000) {
1911 error_report("xlevel value shall always be >= 0x80000000"
1912 ", fixup will be removed in future versions");
1913 numvalue += 0x80000000;
1915 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1916 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1917 } else if (!strcmp(featurestr, "tsc-freq")) {
1918 int64_t tsc_freq;
1919 char *err;
1920 char num[32];
1922 tsc_freq = strtosz_suffix_unit(val, &err,
1923 STRTOSZ_DEFSUFFIX_B, 1000);
1924 if (tsc_freq < 0 || *err) {
1925 error_setg(errp, "bad numerical value %s", val);
1926 return;
1928 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1929 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1930 &local_err);
1931 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1932 char *err;
1933 const int min = 0xFFF;
1934 char num[32];
1935 numvalue = strtoul(val, &err, 0);
1936 if (!*val || *err) {
1937 error_setg(errp, "bad numerical value %s", val);
1938 return;
1940 if (numvalue < min) {
1941 error_report("hv-spinlocks value shall always be >= 0x%x"
1942 ", fixup will be removed in future versions",
1943 min);
1944 numvalue = min;
1946 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1947 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1948 } else {
1949 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1951 } else {
1952 feat2prop(featurestr);
1953 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1955 if (local_err) {
1956 error_propagate(errp, local_err);
1957 return;
1959 featurestr = strtok(NULL, ",");
1962 if (cpu->host_features) {
1963 for (w = 0; w < FEATURE_WORDS; w++) {
1964 env->features[w] =
1965 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1969 for (w = 0; w < FEATURE_WORDS; w++) {
1970 env->features[w] |= plus_features[w];
1971 env->features[w] &= ~minus_features[w];
1975 /* Print all cpuid feature names in featureset
1977 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1979 int bit;
1980 bool first = true;
1982 for (bit = 0; bit < 32; bit++) {
1983 if (featureset[bit]) {
1984 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1985 first = false;
1990 /* generate CPU information. */
1991 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1993 X86CPUDefinition *def;
1994 char buf[256];
1995 int i;
1997 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1998 def = &builtin_x86_defs[i];
1999 snprintf(buf, sizeof(buf), "%s", def->name);
2000 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2002 #ifdef CONFIG_KVM
2003 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2004 "KVM processor with all supported host features "
2005 "(only available in KVM mode)");
2006 #endif
2008 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2009 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2010 FeatureWordInfo *fw = &feature_word_info[i];
2012 (*cpu_fprintf)(f, " ");
2013 listflags(f, cpu_fprintf, fw->feat_names);
2014 (*cpu_fprintf)(f, "\n");
2018 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2020 CpuDefinitionInfoList *cpu_list = NULL;
2021 X86CPUDefinition *def;
2022 int i;
2024 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2025 CpuDefinitionInfoList *entry;
2026 CpuDefinitionInfo *info;
2028 def = &builtin_x86_defs[i];
2029 info = g_malloc0(sizeof(*info));
2030 info->name = g_strdup(def->name);
2032 entry = g_malloc0(sizeof(*entry));
2033 entry->value = info;
2034 entry->next = cpu_list;
2035 cpu_list = entry;
2038 return cpu_list;
2041 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2042 bool migratable_only)
2044 FeatureWordInfo *wi = &feature_word_info[w];
2045 uint32_t r;
2047 if (kvm_enabled()) {
2048 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2049 wi->cpuid_ecx,
2050 wi->cpuid_reg);
2051 } else if (tcg_enabled()) {
2052 r = wi->tcg_features;
2053 } else {
2054 return ~0;
2056 if (migratable_only) {
2057 r &= x86_cpu_get_migratable_flags(w);
2059 return r;
2063 * Filters CPU feature words based on host availability of each feature.
2065 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2067 static int x86_cpu_filter_features(X86CPU *cpu)
2069 CPUX86State *env = &cpu->env;
2070 FeatureWord w;
2071 int rv = 0;
2073 for (w = 0; w < FEATURE_WORDS; w++) {
2074 uint32_t host_feat =
2075 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2076 uint32_t requested_features = env->features[w];
2077 env->features[w] &= host_feat;
2078 cpu->filtered_features[w] = requested_features & ~env->features[w];
2079 if (cpu->filtered_features[w]) {
2080 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2081 report_unavailable_features(w, cpu->filtered_features[w]);
2083 rv = 1;
2087 return rv;
2090 /* Load data from X86CPUDefinition
2092 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2094 CPUX86State *env = &cpu->env;
2095 const char *vendor;
2096 char host_vendor[CPUID_VENDOR_SZ + 1];
2097 FeatureWord w;
2099 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2100 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2101 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2102 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2103 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2104 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2105 cpu->cache_info_passthrough = def->cache_info_passthrough;
2106 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2107 for (w = 0; w < FEATURE_WORDS; w++) {
2108 env->features[w] = def->features[w];
2111 /* Special cases not set in the X86CPUDefinition structs: */
2112 if (kvm_enabled()) {
2113 FeatureWord w;
2114 for (w = 0; w < FEATURE_WORDS; w++) {
2115 env->features[w] |= kvm_default_features[w];
2116 env->features[w] &= ~kvm_default_unset_features[w];
2120 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2122 /* sysenter isn't supported in compatibility mode on AMD,
2123 * syscall isn't supported in compatibility mode on Intel.
2124 * Normally we advertise the actual CPU vendor, but you can
2125 * override this using the 'vendor' property if you want to use
2126 * KVM's sysenter/syscall emulation in compatibility mode and
2127 * when doing cross vendor migration
2129 vendor = def->vendor;
2130 if (kvm_enabled()) {
2131 uint32_t ebx = 0, ecx = 0, edx = 0;
2132 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2133 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2134 vendor = host_vendor;
2137 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2141 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2143 X86CPU *cpu = NULL;
2144 X86CPUClass *xcc;
2145 ObjectClass *oc;
2146 gchar **model_pieces;
2147 char *name, *features;
2148 Error *error = NULL;
2150 model_pieces = g_strsplit(cpu_model, ",", 2);
2151 if (!model_pieces[0]) {
2152 error_setg(&error, "Invalid/empty CPU model name");
2153 goto out;
2155 name = model_pieces[0];
2156 features = model_pieces[1];
2158 oc = x86_cpu_class_by_name(name);
2159 if (oc == NULL) {
2160 error_setg(&error, "Unable to find CPU definition: %s", name);
2161 goto out;
2163 xcc = X86_CPU_CLASS(oc);
2165 if (xcc->kvm_required && !kvm_enabled()) {
2166 error_setg(&error, "CPU model '%s' requires KVM", name);
2167 goto out;
2170 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2172 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2173 if (error) {
2174 goto out;
2177 out:
2178 if (error != NULL) {
2179 error_propagate(errp, error);
2180 if (cpu) {
2181 object_unref(OBJECT(cpu));
2182 cpu = NULL;
2185 g_strfreev(model_pieces);
2186 return cpu;
2189 X86CPU *cpu_x86_init(const char *cpu_model)
2191 Error *error = NULL;
2192 X86CPU *cpu;
2194 cpu = cpu_x86_create(cpu_model, &error);
2195 if (error) {
2196 goto out;
2199 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2201 out:
2202 if (error) {
2203 error_report_err(error);
2204 if (cpu != NULL) {
2205 object_unref(OBJECT(cpu));
2206 cpu = NULL;
2209 return cpu;
2212 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2214 X86CPUDefinition *cpudef = data;
2215 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2217 xcc->cpu_def = cpudef;
2220 static void x86_register_cpudef_type(X86CPUDefinition *def)
2222 char *typename = x86_cpu_type_name(def->name);
2223 TypeInfo ti = {
2224 .name = typename,
2225 .parent = TYPE_X86_CPU,
2226 .class_init = x86_cpu_cpudef_class_init,
2227 .class_data = def,
2230 type_register(&ti);
2231 g_free(typename);
2234 #if !defined(CONFIG_USER_ONLY)
2236 void cpu_clear_apic_feature(CPUX86State *env)
2238 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2241 #endif /* !CONFIG_USER_ONLY */
2243 /* Initialize list of CPU models, filling some non-static fields if necessary
2245 void x86_cpudef_setup(void)
2247 int i, j;
2248 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2250 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2251 X86CPUDefinition *def = &builtin_x86_defs[i];
2253 /* Look for specific "cpudef" models that */
2254 /* have the QEMU version in .model_id */
2255 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2256 if (strcmp(model_with_versions[j], def->name) == 0) {
2257 pstrcpy(def->model_id, sizeof(def->model_id),
2258 "QEMU Virtual CPU version ");
2259 pstrcat(def->model_id, sizeof(def->model_id),
2260 qemu_get_version());
2261 break;
2267 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2268 uint32_t *eax, uint32_t *ebx,
2269 uint32_t *ecx, uint32_t *edx)
2271 X86CPU *cpu = x86_env_get_cpu(env);
2272 CPUState *cs = CPU(cpu);
2274 /* test if maximum index reached */
2275 if (index & 0x80000000) {
2276 if (index > env->cpuid_xlevel) {
2277 if (env->cpuid_xlevel2 > 0) {
2278 /* Handle the Centaur's CPUID instruction. */
2279 if (index > env->cpuid_xlevel2) {
2280 index = env->cpuid_xlevel2;
2281 } else if (index < 0xC0000000) {
2282 index = env->cpuid_xlevel;
2284 } else {
2285 /* Intel documentation states that invalid EAX input will
2286 * return the same information as EAX=cpuid_level
2287 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2289 index = env->cpuid_level;
2292 } else {
2293 if (index > env->cpuid_level)
2294 index = env->cpuid_level;
2297 switch(index) {
2298 case 0:
2299 *eax = env->cpuid_level;
2300 *ebx = env->cpuid_vendor1;
2301 *edx = env->cpuid_vendor2;
2302 *ecx = env->cpuid_vendor3;
2303 break;
2304 case 1:
2305 *eax = env->cpuid_version;
2306 *ebx = (cpu->apic_id << 24) |
2307 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2308 *ecx = env->features[FEAT_1_ECX];
2309 *edx = env->features[FEAT_1_EDX];
2310 if (cs->nr_cores * cs->nr_threads > 1) {
2311 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2312 *edx |= 1 << 28; /* HTT bit */
2314 break;
2315 case 2:
2316 /* cache info: needed for Pentium Pro compatibility */
2317 if (cpu->cache_info_passthrough) {
2318 host_cpuid(index, 0, eax, ebx, ecx, edx);
2319 break;
2321 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2322 *ebx = 0;
2323 *ecx = 0;
2324 *edx = (L1D_DESCRIPTOR << 16) | \
2325 (L1I_DESCRIPTOR << 8) | \
2326 (L2_DESCRIPTOR);
2327 break;
2328 case 4:
2329 /* cache info: needed for Core compatibility */
2330 if (cpu->cache_info_passthrough) {
2331 host_cpuid(index, count, eax, ebx, ecx, edx);
2332 *eax &= ~0xFC000000;
2333 } else {
2334 *eax = 0;
2335 switch (count) {
2336 case 0: /* L1 dcache info */
2337 *eax |= CPUID_4_TYPE_DCACHE | \
2338 CPUID_4_LEVEL(1) | \
2339 CPUID_4_SELF_INIT_LEVEL;
2340 *ebx = (L1D_LINE_SIZE - 1) | \
2341 ((L1D_PARTITIONS - 1) << 12) | \
2342 ((L1D_ASSOCIATIVITY - 1) << 22);
2343 *ecx = L1D_SETS - 1;
2344 *edx = CPUID_4_NO_INVD_SHARING;
2345 break;
2346 case 1: /* L1 icache info */
2347 *eax |= CPUID_4_TYPE_ICACHE | \
2348 CPUID_4_LEVEL(1) | \
2349 CPUID_4_SELF_INIT_LEVEL;
2350 *ebx = (L1I_LINE_SIZE - 1) | \
2351 ((L1I_PARTITIONS - 1) << 12) | \
2352 ((L1I_ASSOCIATIVITY - 1) << 22);
2353 *ecx = L1I_SETS - 1;
2354 *edx = CPUID_4_NO_INVD_SHARING;
2355 break;
2356 case 2: /* L2 cache info */
2357 *eax |= CPUID_4_TYPE_UNIFIED | \
2358 CPUID_4_LEVEL(2) | \
2359 CPUID_4_SELF_INIT_LEVEL;
2360 if (cs->nr_threads > 1) {
2361 *eax |= (cs->nr_threads - 1) << 14;
2363 *ebx = (L2_LINE_SIZE - 1) | \
2364 ((L2_PARTITIONS - 1) << 12) | \
2365 ((L2_ASSOCIATIVITY - 1) << 22);
2366 *ecx = L2_SETS - 1;
2367 *edx = CPUID_4_NO_INVD_SHARING;
2368 break;
2369 default: /* end of info */
2370 *eax = 0;
2371 *ebx = 0;
2372 *ecx = 0;
2373 *edx = 0;
2374 break;
2378 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2379 if ((*eax & 31) && cs->nr_cores > 1) {
2380 *eax |= (cs->nr_cores - 1) << 26;
2382 break;
2383 case 5:
2384 /* mwait info: needed for Core compatibility */
2385 *eax = 0; /* Smallest monitor-line size in bytes */
2386 *ebx = 0; /* Largest monitor-line size in bytes */
2387 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2388 *edx = 0;
2389 break;
2390 case 6:
2391 /* Thermal and Power Leaf */
2392 *eax = env->features[FEAT_6_EAX];
2393 *ebx = 0;
2394 *ecx = 0;
2395 *edx = 0;
2396 break;
2397 case 7:
2398 /* Structured Extended Feature Flags Enumeration Leaf */
2399 if (count == 0) {
2400 *eax = 0; /* Maximum ECX value for sub-leaves */
2401 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2402 *ecx = 0; /* Reserved */
2403 *edx = 0; /* Reserved */
2404 } else {
2405 *eax = 0;
2406 *ebx = 0;
2407 *ecx = 0;
2408 *edx = 0;
2410 break;
2411 case 9:
2412 /* Direct Cache Access Information Leaf */
2413 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2414 *ebx = 0;
2415 *ecx = 0;
2416 *edx = 0;
2417 break;
2418 case 0xA:
2419 /* Architectural Performance Monitoring Leaf */
2420 if (kvm_enabled() && cpu->enable_pmu) {
2421 KVMState *s = cs->kvm_state;
2423 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2424 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2425 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2426 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2427 } else {
2428 *eax = 0;
2429 *ebx = 0;
2430 *ecx = 0;
2431 *edx = 0;
2433 break;
2434 case 0xD: {
2435 KVMState *s = cs->kvm_state;
2436 uint64_t kvm_mask;
2437 int i;
2439 /* Processor Extended State */
2440 *eax = 0;
2441 *ebx = 0;
2442 *ecx = 0;
2443 *edx = 0;
2444 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2445 break;
2447 kvm_mask =
2448 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2449 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2451 if (count == 0) {
2452 *ecx = 0x240;
2453 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2454 const ExtSaveArea *esa = &ext_save_areas[i];
2455 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2456 (kvm_mask & (1 << i)) != 0) {
2457 if (i < 32) {
2458 *eax |= 1 << i;
2459 } else {
2460 *edx |= 1 << (i - 32);
2462 *ecx = MAX(*ecx, esa->offset + esa->size);
2465 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2466 *ebx = *ecx;
2467 } else if (count == 1) {
2468 *eax = env->features[FEAT_XSAVE];
2469 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2470 const ExtSaveArea *esa = &ext_save_areas[count];
2471 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2472 (kvm_mask & (1 << count)) != 0) {
2473 *eax = esa->size;
2474 *ebx = esa->offset;
2477 break;
2479 case 0x80000000:
2480 *eax = env->cpuid_xlevel;
2481 *ebx = env->cpuid_vendor1;
2482 *edx = env->cpuid_vendor2;
2483 *ecx = env->cpuid_vendor3;
2484 break;
2485 case 0x80000001:
2486 *eax = env->cpuid_version;
2487 *ebx = 0;
2488 *ecx = env->features[FEAT_8000_0001_ECX];
2489 *edx = env->features[FEAT_8000_0001_EDX];
2491 /* The Linux kernel checks for the CMPLegacy bit and
2492 * discards multiple thread information if it is set.
2493 * So dont set it here for Intel to make Linux guests happy.
2495 if (cs->nr_cores * cs->nr_threads > 1) {
2496 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2497 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2498 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2499 *ecx |= 1 << 1; /* CmpLegacy bit */
2502 break;
2503 case 0x80000002:
2504 case 0x80000003:
2505 case 0x80000004:
2506 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2507 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2508 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2509 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2510 break;
2511 case 0x80000005:
2512 /* cache info (L1 cache) */
2513 if (cpu->cache_info_passthrough) {
2514 host_cpuid(index, 0, eax, ebx, ecx, edx);
2515 break;
2517 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2518 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2519 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2520 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2521 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2522 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2523 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2524 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2525 break;
2526 case 0x80000006:
2527 /* cache info (L2 cache) */
2528 if (cpu->cache_info_passthrough) {
2529 host_cpuid(index, 0, eax, ebx, ecx, edx);
2530 break;
2532 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2533 (L2_DTLB_2M_ENTRIES << 16) | \
2534 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2535 (L2_ITLB_2M_ENTRIES);
2536 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2537 (L2_DTLB_4K_ENTRIES << 16) | \
2538 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2539 (L2_ITLB_4K_ENTRIES);
2540 *ecx = (L2_SIZE_KB_AMD << 16) | \
2541 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2542 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2543 *edx = ((L3_SIZE_KB/512) << 18) | \
2544 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2545 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2546 break;
2547 case 0x80000007:
2548 *eax = 0;
2549 *ebx = 0;
2550 *ecx = 0;
2551 *edx = env->features[FEAT_8000_0007_EDX];
2552 break;
2553 case 0x80000008:
2554 /* virtual & phys address size in low 2 bytes. */
2555 /* XXX: This value must match the one used in the MMU code. */
2556 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2557 /* 64 bit processor */
2558 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2559 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2560 } else {
2561 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2562 *eax = 0x00000024; /* 36 bits physical */
2563 } else {
2564 *eax = 0x00000020; /* 32 bits physical */
2567 *ebx = 0;
2568 *ecx = 0;
2569 *edx = 0;
2570 if (cs->nr_cores * cs->nr_threads > 1) {
2571 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2573 break;
2574 case 0x8000000A:
2575 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2576 *eax = 0x00000001; /* SVM Revision */
2577 *ebx = 0x00000010; /* nr of ASIDs */
2578 *ecx = 0;
2579 *edx = env->features[FEAT_SVM]; /* optional features */
2580 } else {
2581 *eax = 0;
2582 *ebx = 0;
2583 *ecx = 0;
2584 *edx = 0;
2586 break;
2587 case 0xC0000000:
2588 *eax = env->cpuid_xlevel2;
2589 *ebx = 0;
2590 *ecx = 0;
2591 *edx = 0;
2592 break;
2593 case 0xC0000001:
2594 /* Support for VIA CPU's CPUID instruction */
2595 *eax = env->cpuid_version;
2596 *ebx = 0;
2597 *ecx = 0;
2598 *edx = env->features[FEAT_C000_0001_EDX];
2599 break;
2600 case 0xC0000002:
2601 case 0xC0000003:
2602 case 0xC0000004:
2603 /* Reserved for the future, and now filled with zero */
2604 *eax = 0;
2605 *ebx = 0;
2606 *ecx = 0;
2607 *edx = 0;
2608 break;
2609 default:
2610 /* reserved values: zero */
2611 *eax = 0;
2612 *ebx = 0;
2613 *ecx = 0;
2614 *edx = 0;
2615 break;
2619 /* CPUClass::reset() */
2620 static void x86_cpu_reset(CPUState *s)
2622 X86CPU *cpu = X86_CPU(s);
2623 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2624 CPUX86State *env = &cpu->env;
2625 int i;
2627 xcc->parent_reset(s);
2629 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2631 tlb_flush(s, 1);
2633 env->old_exception = -1;
2635 /* init to reset state */
2637 #ifdef CONFIG_SOFTMMU
2638 env->hflags |= HF_SOFTMMU_MASK;
2639 #endif
2640 env->hflags2 |= HF2_GIF_MASK;
2642 cpu_x86_update_cr0(env, 0x60000010);
2643 env->a20_mask = ~0x0;
2644 env->smbase = 0x30000;
2646 env->idt.limit = 0xffff;
2647 env->gdt.limit = 0xffff;
2648 env->ldt.limit = 0xffff;
2649 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2650 env->tr.limit = 0xffff;
2651 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2653 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2654 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2655 DESC_R_MASK | DESC_A_MASK);
2656 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2657 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2658 DESC_A_MASK);
2659 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2660 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2661 DESC_A_MASK);
2662 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2663 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2664 DESC_A_MASK);
2665 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2666 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2667 DESC_A_MASK);
2668 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2669 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2670 DESC_A_MASK);
2672 env->eip = 0xfff0;
2673 env->regs[R_EDX] = env->cpuid_version;
2675 env->eflags = 0x2;
2677 /* FPU init */
2678 for (i = 0; i < 8; i++) {
2679 env->fptags[i] = 1;
2681 cpu_set_fpuc(env, 0x37f);
2683 env->mxcsr = 0x1f80;
2684 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2686 env->pat = 0x0007040600070406ULL;
2687 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2689 memset(env->dr, 0, sizeof(env->dr));
2690 env->dr[6] = DR6_FIXED_1;
2691 env->dr[7] = DR7_FIXED_1;
2692 cpu_breakpoint_remove_all(s, BP_CPU);
2693 cpu_watchpoint_remove_all(s, BP_CPU);
2695 env->xcr0 = 1;
2698 * SDM 11.11.5 requires:
2699 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2700 * - IA32_MTRR_PHYSMASKn.V = 0
2701 * All other bits are undefined. For simplification, zero it all.
2703 env->mtrr_deftype = 0;
2704 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2705 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2707 #if !defined(CONFIG_USER_ONLY)
2708 /* We hard-wire the BSP to the first CPU. */
2709 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2711 s->halted = !cpu_is_bsp(cpu);
2713 if (kvm_enabled()) {
2714 kvm_arch_reset_vcpu(cpu);
2716 #endif
2719 #ifndef CONFIG_USER_ONLY
2720 bool cpu_is_bsp(X86CPU *cpu)
2722 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2725 /* TODO: remove me, when reset over QOM tree is implemented */
2726 static void x86_cpu_machine_reset_cb(void *opaque)
2728 X86CPU *cpu = opaque;
2729 cpu_reset(CPU(cpu));
2731 #endif
2733 static void mce_init(X86CPU *cpu)
2735 CPUX86State *cenv = &cpu->env;
2736 unsigned int bank;
2738 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2739 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2740 (CPUID_MCE | CPUID_MCA)) {
2741 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2742 cenv->mcg_ctl = ~(uint64_t)0;
2743 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2744 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2749 #ifndef CONFIG_USER_ONLY
2750 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2752 DeviceState *dev = DEVICE(cpu);
2753 APICCommonState *apic;
2754 const char *apic_type = "apic";
2756 if (kvm_irqchip_in_kernel()) {
2757 apic_type = "kvm-apic";
2758 } else if (xen_enabled()) {
2759 apic_type = "xen-apic";
2762 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2763 if (cpu->apic_state == NULL) {
2764 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2765 return;
2768 object_property_add_child(OBJECT(cpu), "apic",
2769 OBJECT(cpu->apic_state), NULL);
2770 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2771 /* TODO: convert to link<> */
2772 apic = APIC_COMMON(cpu->apic_state);
2773 apic->cpu = cpu;
2776 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2778 if (cpu->apic_state == NULL) {
2779 return;
2781 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2782 errp);
2785 static void x86_cpu_machine_done(Notifier *n, void *unused)
2787 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2788 MemoryRegion *smram =
2789 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2791 if (smram) {
2792 cpu->smram = g_new(MemoryRegion, 1);
2793 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2794 smram, 0, 1ull << 32);
2795 memory_region_set_enabled(cpu->smram, false);
2796 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2799 #else
2800 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2803 #endif
2806 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2807 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2808 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2809 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2810 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2811 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2812 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2814 CPUState *cs = CPU(dev);
2815 X86CPU *cpu = X86_CPU(dev);
2816 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2817 CPUX86State *env = &cpu->env;
2818 Error *local_err = NULL;
2819 static bool ht_warned;
2821 if (cpu->apic_id < 0) {
2822 error_setg(errp, "apic-id property was not initialized properly");
2823 return;
2826 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2827 env->cpuid_level = 7;
2830 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2831 * CPUID[1].EDX.
2833 if (IS_AMD_CPU(env)) {
2834 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2835 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2836 & CPUID_EXT2_AMD_ALIASES);
2840 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2841 error_setg(&local_err,
2842 kvm_enabled() ?
2843 "Host doesn't support requested features" :
2844 "TCG doesn't support requested features");
2845 goto out;
2848 #ifndef CONFIG_USER_ONLY
2849 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2851 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2852 x86_cpu_apic_create(cpu, &local_err);
2853 if (local_err != NULL) {
2854 goto out;
2857 #endif
2859 mce_init(cpu);
2861 #ifndef CONFIG_USER_ONLY
2862 if (tcg_enabled()) {
2863 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2864 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2865 cs->as = g_new(AddressSpace, 1);
2867 /* Outer container... */
2868 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2869 memory_region_set_enabled(cpu->cpu_as_root, true);
2871 /* ... with two regions inside: normal system memory with low
2872 * priority, and...
2874 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2875 get_system_memory(), 0, ~0ull);
2876 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2877 memory_region_set_enabled(cpu->cpu_as_mem, true);
2878 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2880 /* ... SMRAM with higher priority, linked from /machine/smram. */
2881 cpu->machine_done.notify = x86_cpu_machine_done;
2882 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2884 #endif
2886 qemu_init_vcpu(cs);
2888 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2889 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2890 * based on inputs (sockets,cores,threads), it is still better to gives
2891 * users a warning.
2893 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2894 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2896 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2897 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2898 " -smp options properly.");
2899 ht_warned = true;
2902 x86_cpu_apic_realize(cpu, &local_err);
2903 if (local_err != NULL) {
2904 goto out;
2906 cpu_reset(cs);
2908 xcc->parent_realize(dev, &local_err);
2910 out:
2911 if (local_err != NULL) {
2912 error_propagate(errp, local_err);
2913 return;
2917 typedef struct BitProperty {
2918 uint32_t *ptr;
2919 uint32_t mask;
2920 } BitProperty;
2922 static void x86_cpu_get_bit_prop(Object *obj,
2923 struct Visitor *v,
2924 void *opaque,
2925 const char *name,
2926 Error **errp)
2928 BitProperty *fp = opaque;
2929 bool value = (*fp->ptr & fp->mask) == fp->mask;
2930 visit_type_bool(v, &value, name, errp);
2933 static void x86_cpu_set_bit_prop(Object *obj,
2934 struct Visitor *v,
2935 void *opaque,
2936 const char *name,
2937 Error **errp)
2939 DeviceState *dev = DEVICE(obj);
2940 BitProperty *fp = opaque;
2941 Error *local_err = NULL;
2942 bool value;
2944 if (dev->realized) {
2945 qdev_prop_set_after_realize(dev, name, errp);
2946 return;
2949 visit_type_bool(v, &value, name, &local_err);
2950 if (local_err) {
2951 error_propagate(errp, local_err);
2952 return;
2955 if (value) {
2956 *fp->ptr |= fp->mask;
2957 } else {
2958 *fp->ptr &= ~fp->mask;
2962 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2963 void *opaque)
2965 BitProperty *prop = opaque;
2966 g_free(prop);
2969 /* Register a boolean property to get/set a single bit in a uint32_t field.
2971 * The same property name can be registered multiple times to make it affect
2972 * multiple bits in the same FeatureWord. In that case, the getter will return
2973 * true only if all bits are set.
2975 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2976 const char *prop_name,
2977 uint32_t *field,
2978 int bitnr)
2980 BitProperty *fp;
2981 ObjectProperty *op;
2982 uint32_t mask = (1UL << bitnr);
2984 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2985 if (op) {
2986 fp = op->opaque;
2987 assert(fp->ptr == field);
2988 fp->mask |= mask;
2989 } else {
2990 fp = g_new0(BitProperty, 1);
2991 fp->ptr = field;
2992 fp->mask = mask;
2993 object_property_add(OBJECT(cpu), prop_name, "bool",
2994 x86_cpu_get_bit_prop,
2995 x86_cpu_set_bit_prop,
2996 x86_cpu_release_bit_prop, fp, &error_abort);
3000 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3001 FeatureWord w,
3002 int bitnr)
3004 Object *obj = OBJECT(cpu);
3005 int i;
3006 char **names;
3007 FeatureWordInfo *fi = &feature_word_info[w];
3009 if (!fi->feat_names) {
3010 return;
3012 if (!fi->feat_names[bitnr]) {
3013 return;
3016 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3018 feat2prop(names[0]);
3019 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3021 for (i = 1; names[i]; i++) {
3022 feat2prop(names[i]);
3023 object_property_add_alias(obj, names[i], obj, names[0],
3024 &error_abort);
3027 g_strfreev(names);
3030 static void x86_cpu_initfn(Object *obj)
3032 CPUState *cs = CPU(obj);
3033 X86CPU *cpu = X86_CPU(obj);
3034 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3035 CPUX86State *env = &cpu->env;
3036 FeatureWord w;
3037 static int inited;
3039 cs->env_ptr = env;
3040 cpu_exec_init(cs, &error_abort);
3042 object_property_add(obj, "family", "int",
3043 x86_cpuid_version_get_family,
3044 x86_cpuid_version_set_family, NULL, NULL, NULL);
3045 object_property_add(obj, "model", "int",
3046 x86_cpuid_version_get_model,
3047 x86_cpuid_version_set_model, NULL, NULL, NULL);
3048 object_property_add(obj, "stepping", "int",
3049 x86_cpuid_version_get_stepping,
3050 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3051 object_property_add_str(obj, "vendor",
3052 x86_cpuid_get_vendor,
3053 x86_cpuid_set_vendor, NULL);
3054 object_property_add_str(obj, "model-id",
3055 x86_cpuid_get_model_id,
3056 x86_cpuid_set_model_id, NULL);
3057 object_property_add(obj, "tsc-frequency", "int",
3058 x86_cpuid_get_tsc_freq,
3059 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3060 object_property_add(obj, "apic-id", "int",
3061 x86_cpuid_get_apic_id,
3062 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3063 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3064 x86_cpu_get_feature_words,
3065 NULL, NULL, (void *)env->features, NULL);
3066 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3067 x86_cpu_get_feature_words,
3068 NULL, NULL, (void *)cpu->filtered_features, NULL);
3070 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3072 #ifndef CONFIG_USER_ONLY
3073 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3074 cpu->apic_id = -1;
3075 #endif
3077 for (w = 0; w < FEATURE_WORDS; w++) {
3078 int bitnr;
3080 for (bitnr = 0; bitnr < 32; bitnr++) {
3081 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3085 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3087 /* init various static tables used in TCG mode */
3088 if (tcg_enabled() && !inited) {
3089 inited = 1;
3090 optimize_flags_init();
3094 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3096 X86CPU *cpu = X86_CPU(cs);
3098 return cpu->apic_id;
3101 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3103 X86CPU *cpu = X86_CPU(cs);
3105 return cpu->env.cr[0] & CR0_PG_MASK;
3108 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3110 X86CPU *cpu = X86_CPU(cs);
3112 cpu->env.eip = value;
3115 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3117 X86CPU *cpu = X86_CPU(cs);
3119 cpu->env.eip = tb->pc - tb->cs_base;
3122 static bool x86_cpu_has_work(CPUState *cs)
3124 X86CPU *cpu = X86_CPU(cs);
3125 CPUX86State *env = &cpu->env;
3127 #if !defined(CONFIG_USER_ONLY)
3128 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3129 apic_poll_irq(cpu->apic_state);
3130 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
3132 #endif
3134 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3135 (env->eflags & IF_MASK)) ||
3136 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3137 CPU_INTERRUPT_INIT |
3138 CPU_INTERRUPT_SIPI |
3139 CPU_INTERRUPT_MCE)) ||
3140 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3141 !(env->hflags & HF_SMM_MASK));
3144 static Property x86_cpu_properties[] = {
3145 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3146 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3147 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3148 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3149 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3150 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3151 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3152 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3153 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3154 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3155 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3156 DEFINE_PROP_END_OF_LIST()
3159 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3161 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3162 CPUClass *cc = CPU_CLASS(oc);
3163 DeviceClass *dc = DEVICE_CLASS(oc);
3165 xcc->parent_realize = dc->realize;
3166 dc->realize = x86_cpu_realizefn;
3167 dc->bus_type = TYPE_ICC_BUS;
3168 dc->props = x86_cpu_properties;
3170 xcc->parent_reset = cc->reset;
3171 cc->reset = x86_cpu_reset;
3172 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3174 cc->class_by_name = x86_cpu_class_by_name;
3175 cc->parse_features = x86_cpu_parse_featurestr;
3176 cc->has_work = x86_cpu_has_work;
3177 cc->do_interrupt = x86_cpu_do_interrupt;
3178 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3179 cc->dump_state = x86_cpu_dump_state;
3180 cc->set_pc = x86_cpu_set_pc;
3181 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3182 cc->gdb_read_register = x86_cpu_gdb_read_register;
3183 cc->gdb_write_register = x86_cpu_gdb_write_register;
3184 cc->get_arch_id = x86_cpu_get_arch_id;
3185 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3186 #ifdef CONFIG_USER_ONLY
3187 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3188 #else
3189 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3190 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3191 cc->write_elf64_note = x86_cpu_write_elf64_note;
3192 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3193 cc->write_elf32_note = x86_cpu_write_elf32_note;
3194 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3195 cc->vmsd = &vmstate_x86_cpu;
3196 #endif
3197 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3198 #ifndef CONFIG_USER_ONLY
3199 cc->debug_excp_handler = breakpoint_handler;
3200 #endif
3201 cc->cpu_exec_enter = x86_cpu_exec_enter;
3202 cc->cpu_exec_exit = x86_cpu_exec_exit;
3205 static const TypeInfo x86_cpu_type_info = {
3206 .name = TYPE_X86_CPU,
3207 .parent = TYPE_CPU,
3208 .instance_size = sizeof(X86CPU),
3209 .instance_init = x86_cpu_initfn,
3210 .abstract = true,
3211 .class_size = sizeof(X86CPUClass),
3212 .class_init = x86_cpu_common_class_init,
3215 static void x86_cpu_register_types(void)
3217 int i;
3219 type_register_static(&x86_cpu_type_info);
3220 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3221 x86_register_cpudef_type(&builtin_x86_defs[i]);
3223 #ifdef CONFIG_KVM
3224 type_register_static(&host_x86_cpu_type_info);
3225 #endif
3228 type_init(x86_cpu_register_types)