qerror: Clean up QERR_ macros to expand into a single string
[qemu/kevin.git] / target-i386 / cpu.c
blobd4f4c8d687c8dfbbcdd636ee74d08a9b539beba9
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #include "hw/hw.h"
39 #if defined(CONFIG_KVM)
40 #include <linux/kvm_para.h>
41 #endif
43 #include "sysemu/sysemu.h"
44 #include "hw/qdev-properties.h"
45 #include "hw/cpu/icc_bus.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
307 /* missing:
308 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
309 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
310 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
311 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
313 /* missing:
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
315 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
316 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
317 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
318 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
319 CPUID_EXT_RDRAND */
321 #ifdef TARGET_X86_64
322 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
323 #else
324 #define TCG_EXT2_X86_64_FEATURES 0
325 #endif
327 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
328 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
329 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
330 TCG_EXT2_X86_64_FEATURES)
331 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
332 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
333 #define TCG_EXT4_FEATURES 0
334 #define TCG_SVM_FEATURES 0
335 #define TCG_KVM_FEATURES 0
336 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
337 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
338 /* missing:
339 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
340 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
341 CPUID_7_0_EBX_RDSEED */
342 #define TCG_APM_FEATURES 0
345 typedef struct FeatureWordInfo {
346 const char **feat_names;
347 uint32_t cpuid_eax; /* Input EAX for CPUID */
348 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
349 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
350 int cpuid_reg; /* output register (R_* constant) */
351 uint32_t tcg_features; /* Feature flags supported by TCG */
352 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
353 } FeatureWordInfo;
355 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
356 [FEAT_1_EDX] = {
357 .feat_names = feature_name,
358 .cpuid_eax = 1, .cpuid_reg = R_EDX,
359 .tcg_features = TCG_FEATURES,
361 [FEAT_1_ECX] = {
362 .feat_names = ext_feature_name,
363 .cpuid_eax = 1, .cpuid_reg = R_ECX,
364 .tcg_features = TCG_EXT_FEATURES,
366 [FEAT_8000_0001_EDX] = {
367 .feat_names = ext2_feature_name,
368 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
369 .tcg_features = TCG_EXT2_FEATURES,
371 [FEAT_8000_0001_ECX] = {
372 .feat_names = ext3_feature_name,
373 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
374 .tcg_features = TCG_EXT3_FEATURES,
376 [FEAT_C000_0001_EDX] = {
377 .feat_names = ext4_feature_name,
378 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
379 .tcg_features = TCG_EXT4_FEATURES,
381 [FEAT_KVM] = {
382 .feat_names = kvm_feature_name,
383 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
384 .tcg_features = TCG_KVM_FEATURES,
386 [FEAT_SVM] = {
387 .feat_names = svm_feature_name,
388 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
389 .tcg_features = TCG_SVM_FEATURES,
391 [FEAT_7_0_EBX] = {
392 .feat_names = cpuid_7_0_ebx_feature_name,
393 .cpuid_eax = 7,
394 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
395 .cpuid_reg = R_EBX,
396 .tcg_features = TCG_7_0_EBX_FEATURES,
398 [FEAT_8000_0007_EDX] = {
399 .feat_names = cpuid_apm_edx_feature_name,
400 .cpuid_eax = 0x80000007,
401 .cpuid_reg = R_EDX,
402 .tcg_features = TCG_APM_FEATURES,
403 .unmigratable_flags = CPUID_APM_INVTSC,
405 [FEAT_XSAVE] = {
406 .feat_names = cpuid_xsave_feature_name,
407 .cpuid_eax = 0xd,
408 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
409 .cpuid_reg = R_EAX,
410 .tcg_features = 0,
414 typedef struct X86RegisterInfo32 {
415 /* Name of register */
416 const char *name;
417 /* QAPI enum value register */
418 X86CPURegister32 qapi_enum;
419 } X86RegisterInfo32;
421 #define REGISTER(reg) \
422 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
423 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
424 REGISTER(EAX),
425 REGISTER(ECX),
426 REGISTER(EDX),
427 REGISTER(EBX),
428 REGISTER(ESP),
429 REGISTER(EBP),
430 REGISTER(ESI),
431 REGISTER(EDI),
433 #undef REGISTER
435 typedef struct ExtSaveArea {
436 uint32_t feature, bits;
437 uint32_t offset, size;
438 } ExtSaveArea;
440 static const ExtSaveArea ext_save_areas[] = {
441 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
442 .offset = 0x240, .size = 0x100 },
443 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
444 .offset = 0x3c0, .size = 0x40 },
445 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
446 .offset = 0x400, .size = 0x40 },
447 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
448 .offset = 0x440, .size = 0x40 },
449 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
450 .offset = 0x480, .size = 0x200 },
451 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
452 .offset = 0x680, .size = 0x400 },
455 const char *get_register_name_32(unsigned int reg)
457 if (reg >= CPU_NB_REGS32) {
458 return NULL;
460 return x86_reg_info_32[reg].name;
463 /* KVM-specific features that are automatically added to all CPU models
464 * when KVM is enabled.
466 static uint32_t kvm_default_features[FEATURE_WORDS] = {
467 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
468 (1 << KVM_FEATURE_NOP_IO_DELAY) |
469 (1 << KVM_FEATURE_CLOCKSOURCE2) |
470 (1 << KVM_FEATURE_ASYNC_PF) |
471 (1 << KVM_FEATURE_STEAL_TIME) |
472 (1 << KVM_FEATURE_PV_EOI) |
473 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
474 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
477 /* Features that are not added by default to any CPU model when KVM is enabled.
479 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
480 [FEAT_1_EDX] = CPUID_ACPI,
481 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
482 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
485 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
487 kvm_default_features[w] &= ~features;
490 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
492 kvm_default_unset_features[w] &= ~features;
496 * Returns the set of feature flags that are supported and migratable by
497 * QEMU, for a given FeatureWord.
499 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
501 FeatureWordInfo *wi = &feature_word_info[w];
502 uint32_t r = 0;
503 int i;
505 for (i = 0; i < 32; i++) {
506 uint32_t f = 1U << i;
507 /* If the feature name is unknown, it is not supported by QEMU yet */
508 if (!wi->feat_names[i]) {
509 continue;
511 /* Skip features known to QEMU, but explicitly marked as unmigratable */
512 if (wi->unmigratable_flags & f) {
513 continue;
515 r |= f;
517 return r;
520 void host_cpuid(uint32_t function, uint32_t count,
521 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
523 uint32_t vec[4];
525 #ifdef __x86_64__
526 asm volatile("cpuid"
527 : "=a"(vec[0]), "=b"(vec[1]),
528 "=c"(vec[2]), "=d"(vec[3])
529 : "0"(function), "c"(count) : "cc");
530 #elif defined(__i386__)
531 asm volatile("pusha \n\t"
532 "cpuid \n\t"
533 "mov %%eax, 0(%2) \n\t"
534 "mov %%ebx, 4(%2) \n\t"
535 "mov %%ecx, 8(%2) \n\t"
536 "mov %%edx, 12(%2) \n\t"
537 "popa"
538 : : "a"(function), "c"(count), "S"(vec)
539 : "memory", "cc");
540 #else
541 abort();
542 #endif
544 if (eax)
545 *eax = vec[0];
546 if (ebx)
547 *ebx = vec[1];
548 if (ecx)
549 *ecx = vec[2];
550 if (edx)
551 *edx = vec[3];
554 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
556 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
557 * a substring. ex if !NULL points to the first char after a substring,
558 * otherwise the string is assumed to sized by a terminating nul.
559 * Return lexical ordering of *s1:*s2.
561 static int sstrcmp(const char *s1, const char *e1,
562 const char *s2, const char *e2)
564 for (;;) {
565 if (!*s1 || !*s2 || *s1 != *s2)
566 return (*s1 - *s2);
567 ++s1, ++s2;
568 if (s1 == e1 && s2 == e2)
569 return (0);
570 else if (s1 == e1)
571 return (*s2);
572 else if (s2 == e2)
573 return (*s1);
577 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
578 * '|' delimited (possibly empty) strings in which case search for a match
579 * within the alternatives proceeds left to right. Return 0 for success,
580 * non-zero otherwise.
582 static int altcmp(const char *s, const char *e, const char *altstr)
584 const char *p, *q;
586 for (q = p = altstr; ; ) {
587 while (*p && *p != '|')
588 ++p;
589 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
590 return (0);
591 if (!*p)
592 return (1);
593 else
594 q = ++p;
598 /* search featureset for flag *[s..e), if found set corresponding bit in
599 * *pval and return true, otherwise return false
601 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
602 const char **featureset)
604 uint32_t mask;
605 const char **ppc;
606 bool found = false;
608 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
609 if (*ppc && !altcmp(s, e, *ppc)) {
610 *pval |= mask;
611 found = true;
614 return found;
617 static void add_flagname_to_bitmaps(const char *flagname,
618 FeatureWordArray words,
619 Error **errp)
621 FeatureWord w;
622 for (w = 0; w < FEATURE_WORDS; w++) {
623 FeatureWordInfo *wi = &feature_word_info[w];
624 if (wi->feat_names &&
625 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
626 break;
629 if (w == FEATURE_WORDS) {
630 error_setg(errp, "CPU feature %s not found", flagname);
634 /* CPU class name definitions: */
636 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
637 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
639 /* Return type name for a given CPU model name
640 * Caller is responsible for freeing the returned string.
642 static char *x86_cpu_type_name(const char *model_name)
644 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
647 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
649 ObjectClass *oc;
650 char *typename;
652 if (cpu_model == NULL) {
653 return NULL;
656 typename = x86_cpu_type_name(cpu_model);
657 oc = object_class_by_name(typename);
658 g_free(typename);
659 return oc;
662 struct X86CPUDefinition {
663 const char *name;
664 uint32_t level;
665 uint32_t xlevel;
666 uint32_t xlevel2;
667 /* vendor is zero-terminated, 12 character ASCII string */
668 char vendor[CPUID_VENDOR_SZ + 1];
669 int family;
670 int model;
671 int stepping;
672 FeatureWordArray features;
673 char model_id[48];
674 bool cache_info_passthrough;
677 static X86CPUDefinition builtin_x86_defs[] = {
679 .name = "qemu64",
680 .level = 4,
681 .vendor = CPUID_VENDOR_AMD,
682 .family = 6,
683 .model = 6,
684 .stepping = 3,
685 .features[FEAT_1_EDX] =
686 PPRO_FEATURES |
687 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
688 CPUID_PSE36,
689 .features[FEAT_1_ECX] =
690 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
691 .features[FEAT_8000_0001_EDX] =
692 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
693 .features[FEAT_8000_0001_ECX] =
694 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
695 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
696 .xlevel = 0x8000000A,
699 .name = "phenom",
700 .level = 5,
701 .vendor = CPUID_VENDOR_AMD,
702 .family = 16,
703 .model = 2,
704 .stepping = 3,
705 /* Missing: CPUID_HT */
706 .features[FEAT_1_EDX] =
707 PPRO_FEATURES |
708 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
709 CPUID_PSE36 | CPUID_VME,
710 .features[FEAT_1_ECX] =
711 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
712 CPUID_EXT_POPCNT,
713 .features[FEAT_8000_0001_EDX] =
714 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
715 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
716 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
717 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
718 CPUID_EXT3_CR8LEG,
719 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
720 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
721 .features[FEAT_8000_0001_ECX] =
722 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
723 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
724 /* Missing: CPUID_SVM_LBRV */
725 .features[FEAT_SVM] =
726 CPUID_SVM_NPT,
727 .xlevel = 0x8000001A,
728 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
731 .name = "core2duo",
732 .level = 10,
733 .vendor = CPUID_VENDOR_INTEL,
734 .family = 6,
735 .model = 15,
736 .stepping = 11,
737 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
738 .features[FEAT_1_EDX] =
739 PPRO_FEATURES |
740 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
741 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
742 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
743 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
744 .features[FEAT_1_ECX] =
745 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
746 CPUID_EXT_CX16,
747 .features[FEAT_8000_0001_EDX] =
748 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
749 .features[FEAT_8000_0001_ECX] =
750 CPUID_EXT3_LAHF_LM,
751 .xlevel = 0x80000008,
752 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
755 .name = "kvm64",
756 .level = 5,
757 .vendor = CPUID_VENDOR_INTEL,
758 .family = 15,
759 .model = 6,
760 .stepping = 1,
761 /* Missing: CPUID_HT */
762 .features[FEAT_1_EDX] =
763 PPRO_FEATURES | CPUID_VME |
764 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
765 CPUID_PSE36,
766 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
770 .features[FEAT_8000_0001_EDX] =
771 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
772 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
773 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
774 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
775 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
776 .features[FEAT_8000_0001_ECX] =
778 .xlevel = 0x80000008,
779 .model_id = "Common KVM processor"
782 .name = "qemu32",
783 .level = 4,
784 .vendor = CPUID_VENDOR_INTEL,
785 .family = 6,
786 .model = 6,
787 .stepping = 3,
788 .features[FEAT_1_EDX] =
789 PPRO_FEATURES,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
792 .xlevel = 0x80000004,
795 .name = "kvm32",
796 .level = 5,
797 .vendor = CPUID_VENDOR_INTEL,
798 .family = 15,
799 .model = 6,
800 .stepping = 1,
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES | CPUID_VME |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
804 .features[FEAT_1_ECX] =
805 CPUID_EXT_SSE3,
806 .features[FEAT_8000_0001_ECX] =
808 .xlevel = 0x80000008,
809 .model_id = "Common 32-bit KVM processor"
812 .name = "coreduo",
813 .level = 10,
814 .vendor = CPUID_VENDOR_INTEL,
815 .family = 6,
816 .model = 14,
817 .stepping = 8,
818 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
822 CPUID_SS,
823 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
824 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
825 .features[FEAT_1_ECX] =
826 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_NX,
829 .xlevel = 0x80000008,
830 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
833 .name = "486",
834 .level = 1,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 4,
837 .model = 8,
838 .stepping = 0,
839 .features[FEAT_1_EDX] =
840 I486_FEATURES,
841 .xlevel = 0,
844 .name = "pentium",
845 .level = 1,
846 .vendor = CPUID_VENDOR_INTEL,
847 .family = 5,
848 .model = 4,
849 .stepping = 3,
850 .features[FEAT_1_EDX] =
851 PENTIUM_FEATURES,
852 .xlevel = 0,
855 .name = "pentium2",
856 .level = 2,
857 .vendor = CPUID_VENDOR_INTEL,
858 .family = 6,
859 .model = 5,
860 .stepping = 2,
861 .features[FEAT_1_EDX] =
862 PENTIUM2_FEATURES,
863 .xlevel = 0,
866 .name = "pentium3",
867 .level = 2,
868 .vendor = CPUID_VENDOR_INTEL,
869 .family = 6,
870 .model = 7,
871 .stepping = 3,
872 .features[FEAT_1_EDX] =
873 PENTIUM3_FEATURES,
874 .xlevel = 0,
877 .name = "athlon",
878 .level = 2,
879 .vendor = CPUID_VENDOR_AMD,
880 .family = 6,
881 .model = 2,
882 .stepping = 3,
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
885 CPUID_MCA,
886 .features[FEAT_8000_0001_EDX] =
887 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
888 .xlevel = 0x80000008,
891 .name = "n270",
892 /* original is on level 10 */
893 .level = 5,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 6,
896 .model = 28,
897 .stepping = 2,
898 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
899 .features[FEAT_1_EDX] =
900 PPRO_FEATURES |
901 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
902 CPUID_ACPI | CPUID_SS,
903 /* Some CPUs got no CPUID_SEP */
904 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
905 * CPUID_EXT_XTPR */
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
908 CPUID_EXT_MOVBE,
909 .features[FEAT_8000_0001_EDX] =
910 CPUID_EXT2_NX,
911 .features[FEAT_8000_0001_ECX] =
912 CPUID_EXT3_LAHF_LM,
913 .xlevel = 0x8000000A,
914 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
917 .name = "Conroe",
918 .level = 4,
919 .vendor = CPUID_VENDOR_INTEL,
920 .family = 6,
921 .model = 15,
922 .stepping = 3,
923 .features[FEAT_1_EDX] =
924 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
925 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
926 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
927 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
928 CPUID_DE | CPUID_FP87,
929 .features[FEAT_1_ECX] =
930 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
931 .features[FEAT_8000_0001_EDX] =
932 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
933 .features[FEAT_8000_0001_ECX] =
934 CPUID_EXT3_LAHF_LM,
935 .xlevel = 0x8000000A,
936 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
939 .name = "Penryn",
940 .level = 4,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 6,
943 .model = 23,
944 .stepping = 3,
945 .features[FEAT_1_EDX] =
946 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
947 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
948 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
949 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
950 CPUID_DE | CPUID_FP87,
951 .features[FEAT_1_ECX] =
952 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
953 CPUID_EXT_SSE3,
954 .features[FEAT_8000_0001_EDX] =
955 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
956 .features[FEAT_8000_0001_ECX] =
957 CPUID_EXT3_LAHF_LM,
958 .xlevel = 0x8000000A,
959 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
962 .name = "Nehalem",
963 .level = 4,
964 .vendor = CPUID_VENDOR_INTEL,
965 .family = 6,
966 .model = 26,
967 .stepping = 3,
968 .features[FEAT_1_EDX] =
969 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
970 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
971 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
972 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
973 CPUID_DE | CPUID_FP87,
974 .features[FEAT_1_ECX] =
975 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
976 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
977 .features[FEAT_8000_0001_EDX] =
978 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
979 .features[FEAT_8000_0001_ECX] =
980 CPUID_EXT3_LAHF_LM,
981 .xlevel = 0x8000000A,
982 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
985 .name = "Westmere",
986 .level = 11,
987 .vendor = CPUID_VENDOR_INTEL,
988 .family = 6,
989 .model = 44,
990 .stepping = 1,
991 .features[FEAT_1_EDX] =
992 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
993 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
994 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
995 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
996 CPUID_DE | CPUID_FP87,
997 .features[FEAT_1_ECX] =
998 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
999 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1000 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1001 .features[FEAT_8000_0001_EDX] =
1002 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1003 .features[FEAT_8000_0001_ECX] =
1004 CPUID_EXT3_LAHF_LM,
1005 .xlevel = 0x8000000A,
1006 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1009 .name = "SandyBridge",
1010 .level = 0xd,
1011 .vendor = CPUID_VENDOR_INTEL,
1012 .family = 6,
1013 .model = 42,
1014 .stepping = 1,
1015 .features[FEAT_1_EDX] =
1016 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1017 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1018 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1019 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1020 CPUID_DE | CPUID_FP87,
1021 .features[FEAT_1_ECX] =
1022 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1023 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1024 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1025 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1026 CPUID_EXT_SSE3,
1027 .features[FEAT_8000_0001_EDX] =
1028 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1029 CPUID_EXT2_SYSCALL,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .features[FEAT_XSAVE] =
1033 CPUID_XSAVE_XSAVEOPT,
1034 .xlevel = 0x8000000A,
1035 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1038 .name = "IvyBridge",
1039 .level = 0xd,
1040 .vendor = CPUID_VENDOR_INTEL,
1041 .family = 6,
1042 .model = 58,
1043 .stepping = 9,
1044 .features[FEAT_1_EDX] =
1045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1049 CPUID_DE | CPUID_FP87,
1050 .features[FEAT_1_ECX] =
1051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1052 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1053 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1054 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1055 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1056 .features[FEAT_7_0_EBX] =
1057 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1058 CPUID_7_0_EBX_ERMS,
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1061 CPUID_EXT2_SYSCALL,
1062 .features[FEAT_8000_0001_ECX] =
1063 CPUID_EXT3_LAHF_LM,
1064 .features[FEAT_XSAVE] =
1065 CPUID_XSAVE_XSAVEOPT,
1066 .xlevel = 0x8000000A,
1067 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1070 .name = "Haswell-noTSX",
1071 .level = 0xd,
1072 .vendor = CPUID_VENDOR_INTEL,
1073 .family = 6,
1074 .model = 60,
1075 .stepping = 1,
1076 .features[FEAT_1_EDX] =
1077 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1078 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1079 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1080 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1081 CPUID_DE | CPUID_FP87,
1082 .features[FEAT_1_ECX] =
1083 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1084 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1085 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1086 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1087 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1088 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1089 .features[FEAT_8000_0001_EDX] =
1090 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1091 CPUID_EXT2_SYSCALL,
1092 .features[FEAT_8000_0001_ECX] =
1093 CPUID_EXT3_LAHF_LM,
1094 .features[FEAT_7_0_EBX] =
1095 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1096 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1097 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1098 .features[FEAT_XSAVE] =
1099 CPUID_XSAVE_XSAVEOPT,
1100 .xlevel = 0x8000000A,
1101 .model_id = "Intel Core Processor (Haswell, no TSX)",
1102 }, {
1103 .name = "Haswell",
1104 .level = 0xd,
1105 .vendor = CPUID_VENDOR_INTEL,
1106 .family = 6,
1107 .model = 60,
1108 .stepping = 1,
1109 .features[FEAT_1_EDX] =
1110 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1117 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1118 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1119 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1121 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1122 .features[FEAT_8000_0001_EDX] =
1123 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1124 CPUID_EXT2_SYSCALL,
1125 .features[FEAT_8000_0001_ECX] =
1126 CPUID_EXT3_LAHF_LM,
1127 .features[FEAT_7_0_EBX] =
1128 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1129 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1130 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1131 CPUID_7_0_EBX_RTM,
1132 .features[FEAT_XSAVE] =
1133 CPUID_XSAVE_XSAVEOPT,
1134 .xlevel = 0x8000000A,
1135 .model_id = "Intel Core Processor (Haswell)",
1138 .name = "Broadwell-noTSX",
1139 .level = 0xd,
1140 .vendor = CPUID_VENDOR_INTEL,
1141 .family = 6,
1142 .model = 61,
1143 .stepping = 2,
1144 .features[FEAT_1_EDX] =
1145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1149 CPUID_DE | CPUID_FP87,
1150 .features[FEAT_1_ECX] =
1151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_8000_0001_EDX] =
1158 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1159 CPUID_EXT2_SYSCALL,
1160 .features[FEAT_8000_0001_ECX] =
1161 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1162 .features[FEAT_7_0_EBX] =
1163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1164 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1166 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1167 CPUID_7_0_EBX_SMAP,
1168 .features[FEAT_XSAVE] =
1169 CPUID_XSAVE_XSAVEOPT,
1170 .xlevel = 0x8000000A,
1171 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1174 .name = "Broadwell",
1175 .level = 0xd,
1176 .vendor = CPUID_VENDOR_INTEL,
1177 .family = 6,
1178 .model = 61,
1179 .stepping = 2,
1180 .features[FEAT_1_EDX] =
1181 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1182 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1183 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1184 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1185 CPUID_DE | CPUID_FP87,
1186 .features[FEAT_1_ECX] =
1187 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1188 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1189 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1190 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1191 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1192 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1193 .features[FEAT_8000_0001_EDX] =
1194 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1195 CPUID_EXT2_SYSCALL,
1196 .features[FEAT_8000_0001_ECX] =
1197 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1198 .features[FEAT_7_0_EBX] =
1199 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1200 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1201 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1202 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1203 CPUID_7_0_EBX_SMAP,
1204 .features[FEAT_XSAVE] =
1205 CPUID_XSAVE_XSAVEOPT,
1206 .xlevel = 0x8000000A,
1207 .model_id = "Intel Core Processor (Broadwell)",
1210 .name = "Opteron_G1",
1211 .level = 5,
1212 .vendor = CPUID_VENDOR_AMD,
1213 .family = 15,
1214 .model = 6,
1215 .stepping = 1,
1216 .features[FEAT_1_EDX] =
1217 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1218 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1219 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1220 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1221 CPUID_DE | CPUID_FP87,
1222 .features[FEAT_1_ECX] =
1223 CPUID_EXT_SSE3,
1224 .features[FEAT_8000_0001_EDX] =
1225 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1226 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1227 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1228 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1229 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1230 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1231 .xlevel = 0x80000008,
1232 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1235 .name = "Opteron_G2",
1236 .level = 5,
1237 .vendor = CPUID_VENDOR_AMD,
1238 .family = 15,
1239 .model = 6,
1240 .stepping = 1,
1241 .features[FEAT_1_EDX] =
1242 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1243 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1244 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1245 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1246 CPUID_DE | CPUID_FP87,
1247 .features[FEAT_1_ECX] =
1248 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1249 .features[FEAT_8000_0001_EDX] =
1250 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1251 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1252 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1253 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1254 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1255 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1256 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1257 .features[FEAT_8000_0001_ECX] =
1258 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1259 .xlevel = 0x80000008,
1260 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1263 .name = "Opteron_G3",
1264 .level = 5,
1265 .vendor = CPUID_VENDOR_AMD,
1266 .family = 15,
1267 .model = 6,
1268 .stepping = 1,
1269 .features[FEAT_1_EDX] =
1270 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1271 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1272 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1273 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1274 CPUID_DE | CPUID_FP87,
1275 .features[FEAT_1_ECX] =
1276 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1277 CPUID_EXT_SSE3,
1278 .features[FEAT_8000_0001_EDX] =
1279 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1280 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1281 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1282 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1283 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1284 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1285 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1286 .features[FEAT_8000_0001_ECX] =
1287 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1288 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1289 .xlevel = 0x80000008,
1290 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1293 .name = "Opteron_G4",
1294 .level = 0xd,
1295 .vendor = CPUID_VENDOR_AMD,
1296 .family = 21,
1297 .model = 1,
1298 .stepping = 2,
1299 .features[FEAT_1_EDX] =
1300 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1301 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1302 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1303 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1304 CPUID_DE | CPUID_FP87,
1305 .features[FEAT_1_ECX] =
1306 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1307 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1308 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1309 CPUID_EXT_SSE3,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1312 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1313 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1314 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1315 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1316 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1317 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1318 .features[FEAT_8000_0001_ECX] =
1319 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1320 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1321 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1322 CPUID_EXT3_LAHF_LM,
1323 /* no xsaveopt! */
1324 .xlevel = 0x8000001A,
1325 .model_id = "AMD Opteron 62xx class CPU",
1328 .name = "Opteron_G5",
1329 .level = 0xd,
1330 .vendor = CPUID_VENDOR_AMD,
1331 .family = 21,
1332 .model = 2,
1333 .stepping = 0,
1334 .features[FEAT_1_EDX] =
1335 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1336 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1337 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1338 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1339 CPUID_DE | CPUID_FP87,
1340 .features[FEAT_1_ECX] =
1341 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1342 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1343 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1344 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1345 .features[FEAT_8000_0001_EDX] =
1346 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1347 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1348 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1349 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1350 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1351 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1352 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1353 .features[FEAT_8000_0001_ECX] =
1354 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1355 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1356 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1357 CPUID_EXT3_LAHF_LM,
1358 /* no xsaveopt! */
1359 .xlevel = 0x8000001A,
1360 .model_id = "AMD Opteron 63xx class CPU",
1365 * x86_cpu_compat_set_features:
1366 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1367 * @w: Identifies the feature word to be changed.
1368 * @feat_add: Feature bits to be added to feature word
1369 * @feat_remove: Feature bits to be removed from feature word
1371 * Change CPU model feature bits for compatibility.
1373 * This function may be used by machine-type compatibility functions
1374 * to enable or disable feature bits on specific CPU models.
1376 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1377 uint32_t feat_add, uint32_t feat_remove)
1379 X86CPUDefinition *def;
1380 int i;
1381 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1382 def = &builtin_x86_defs[i];
1383 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1384 def->features[w] |= feat_add;
1385 def->features[w] &= ~feat_remove;
1390 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1391 bool migratable_only);
1393 #ifdef CONFIG_KVM
1395 static int cpu_x86_fill_model_id(char *str)
1397 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1398 int i;
1400 for (i = 0; i < 3; i++) {
1401 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1402 memcpy(str + i * 16 + 0, &eax, 4);
1403 memcpy(str + i * 16 + 4, &ebx, 4);
1404 memcpy(str + i * 16 + 8, &ecx, 4);
1405 memcpy(str + i * 16 + 12, &edx, 4);
1407 return 0;
1410 static X86CPUDefinition host_cpudef;
1412 static Property host_x86_cpu_properties[] = {
1413 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1414 DEFINE_PROP_END_OF_LIST()
1417 /* class_init for the "host" CPU model
1419 * This function may be called before KVM is initialized.
1421 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1423 DeviceClass *dc = DEVICE_CLASS(oc);
1424 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1425 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1427 xcc->kvm_required = true;
1429 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1430 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1432 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1433 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1434 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1435 host_cpudef.stepping = eax & 0x0F;
1437 cpu_x86_fill_model_id(host_cpudef.model_id);
1439 xcc->cpu_def = &host_cpudef;
1440 host_cpudef.cache_info_passthrough = true;
1442 /* level, xlevel, xlevel2, and the feature words are initialized on
1443 * instance_init, because they require KVM to be initialized.
1446 dc->props = host_x86_cpu_properties;
1449 static void host_x86_cpu_initfn(Object *obj)
1451 X86CPU *cpu = X86_CPU(obj);
1452 CPUX86State *env = &cpu->env;
1453 KVMState *s = kvm_state;
1455 assert(kvm_enabled());
1457 /* We can't fill the features array here because we don't know yet if
1458 * "migratable" is true or false.
1460 cpu->host_features = true;
1462 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1463 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1464 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1466 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1469 static const TypeInfo host_x86_cpu_type_info = {
1470 .name = X86_CPU_TYPE_NAME("host"),
1471 .parent = TYPE_X86_CPU,
1472 .instance_init = host_x86_cpu_initfn,
1473 .class_init = host_x86_cpu_class_init,
1476 #endif
1478 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1480 FeatureWordInfo *f = &feature_word_info[w];
1481 int i;
1483 for (i = 0; i < 32; ++i) {
1484 if (1 << i & mask) {
1485 const char *reg = get_register_name_32(f->cpuid_reg);
1486 assert(reg);
1487 fprintf(stderr, "warning: %s doesn't support requested feature: "
1488 "CPUID.%02XH:%s%s%s [bit %d]\n",
1489 kvm_enabled() ? "host" : "TCG",
1490 f->cpuid_eax, reg,
1491 f->feat_names[i] ? "." : "",
1492 f->feat_names[i] ? f->feat_names[i] : "", i);
1497 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1498 const char *name, Error **errp)
1500 X86CPU *cpu = X86_CPU(obj);
1501 CPUX86State *env = &cpu->env;
1502 int64_t value;
1504 value = (env->cpuid_version >> 8) & 0xf;
1505 if (value == 0xf) {
1506 value += (env->cpuid_version >> 20) & 0xff;
1508 visit_type_int(v, &value, name, errp);
1511 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1512 const char *name, Error **errp)
1514 X86CPU *cpu = X86_CPU(obj);
1515 CPUX86State *env = &cpu->env;
1516 const int64_t min = 0;
1517 const int64_t max = 0xff + 0xf;
1518 Error *local_err = NULL;
1519 int64_t value;
1521 visit_type_int(v, &value, name, &local_err);
1522 if (local_err) {
1523 error_propagate(errp, local_err);
1524 return;
1526 if (value < min || value > max) {
1527 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1528 name ? name : "null", value, min, max);
1529 return;
1532 env->cpuid_version &= ~0xff00f00;
1533 if (value > 0x0f) {
1534 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1535 } else {
1536 env->cpuid_version |= value << 8;
1540 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1541 const char *name, Error **errp)
1543 X86CPU *cpu = X86_CPU(obj);
1544 CPUX86State *env = &cpu->env;
1545 int64_t value;
1547 value = (env->cpuid_version >> 4) & 0xf;
1548 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1549 visit_type_int(v, &value, name, errp);
1552 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1553 const char *name, Error **errp)
1555 X86CPU *cpu = X86_CPU(obj);
1556 CPUX86State *env = &cpu->env;
1557 const int64_t min = 0;
1558 const int64_t max = 0xff;
1559 Error *local_err = NULL;
1560 int64_t value;
1562 visit_type_int(v, &value, name, &local_err);
1563 if (local_err) {
1564 error_propagate(errp, local_err);
1565 return;
1567 if (value < min || value > max) {
1568 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1569 name ? name : "null", value, min, max);
1570 return;
1573 env->cpuid_version &= ~0xf00f0;
1574 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1577 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1578 void *opaque, const char *name,
1579 Error **errp)
1581 X86CPU *cpu = X86_CPU(obj);
1582 CPUX86State *env = &cpu->env;
1583 int64_t value;
1585 value = env->cpuid_version & 0xf;
1586 visit_type_int(v, &value, name, errp);
1589 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1590 void *opaque, const char *name,
1591 Error **errp)
1593 X86CPU *cpu = X86_CPU(obj);
1594 CPUX86State *env = &cpu->env;
1595 const int64_t min = 0;
1596 const int64_t max = 0xf;
1597 Error *local_err = NULL;
1598 int64_t value;
1600 visit_type_int(v, &value, name, &local_err);
1601 if (local_err) {
1602 error_propagate(errp, local_err);
1603 return;
1605 if (value < min || value > max) {
1606 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1607 name ? name : "null", value, min, max);
1608 return;
1611 env->cpuid_version &= ~0xf;
1612 env->cpuid_version |= value & 0xf;
1615 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1617 X86CPU *cpu = X86_CPU(obj);
1618 CPUX86State *env = &cpu->env;
1619 char *value;
1621 value = g_malloc(CPUID_VENDOR_SZ + 1);
1622 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1623 env->cpuid_vendor3);
1624 return value;
1627 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1628 Error **errp)
1630 X86CPU *cpu = X86_CPU(obj);
1631 CPUX86State *env = &cpu->env;
1632 int i;
1634 if (strlen(value) != CPUID_VENDOR_SZ) {
1635 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1636 return;
1639 env->cpuid_vendor1 = 0;
1640 env->cpuid_vendor2 = 0;
1641 env->cpuid_vendor3 = 0;
1642 for (i = 0; i < 4; i++) {
1643 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1644 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1645 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1649 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1651 X86CPU *cpu = X86_CPU(obj);
1652 CPUX86State *env = &cpu->env;
1653 char *value;
1654 int i;
1656 value = g_malloc(48 + 1);
1657 for (i = 0; i < 48; i++) {
1658 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1660 value[48] = '\0';
1661 return value;
1664 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1665 Error **errp)
1667 X86CPU *cpu = X86_CPU(obj);
1668 CPUX86State *env = &cpu->env;
1669 int c, len, i;
1671 if (model_id == NULL) {
1672 model_id = "";
1674 len = strlen(model_id);
1675 memset(env->cpuid_model, 0, 48);
1676 for (i = 0; i < 48; i++) {
1677 if (i >= len) {
1678 c = '\0';
1679 } else {
1680 c = (uint8_t)model_id[i];
1682 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1686 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1687 const char *name, Error **errp)
1689 X86CPU *cpu = X86_CPU(obj);
1690 int64_t value;
1692 value = cpu->env.tsc_khz * 1000;
1693 visit_type_int(v, &value, name, errp);
1696 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1697 const char *name, Error **errp)
1699 X86CPU *cpu = X86_CPU(obj);
1700 const int64_t min = 0;
1701 const int64_t max = INT64_MAX;
1702 Error *local_err = NULL;
1703 int64_t value;
1705 visit_type_int(v, &value, name, &local_err);
1706 if (local_err) {
1707 error_propagate(errp, local_err);
1708 return;
1710 if (value < min || value > max) {
1711 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1712 name ? name : "null", value, min, max);
1713 return;
1716 cpu->env.tsc_khz = value / 1000;
1719 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1720 const char *name, Error **errp)
1722 X86CPU *cpu = X86_CPU(obj);
1723 int64_t value = cpu->apic_id;
1725 visit_type_int(v, &value, name, errp);
1728 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1729 const char *name, Error **errp)
1731 X86CPU *cpu = X86_CPU(obj);
1732 DeviceState *dev = DEVICE(obj);
1733 const int64_t min = 0;
1734 const int64_t max = UINT32_MAX;
1735 Error *error = NULL;
1736 int64_t value;
1738 if (dev->realized) {
1739 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1740 "it was realized", name, object_get_typename(obj));
1741 return;
1744 visit_type_int(v, &value, name, &error);
1745 if (error) {
1746 error_propagate(errp, error);
1747 return;
1749 if (value < min || value > max) {
1750 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1751 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1752 object_get_typename(obj), name, value, min, max);
1753 return;
1756 if ((value != cpu->apic_id) && cpu_exists(value)) {
1757 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1758 return;
1760 cpu->apic_id = value;
1763 /* Generic getter for "feature-words" and "filtered-features" properties */
1764 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1765 const char *name, Error **errp)
1767 uint32_t *array = (uint32_t *)opaque;
1768 FeatureWord w;
1769 Error *err = NULL;
1770 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1771 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1772 X86CPUFeatureWordInfoList *list = NULL;
1774 for (w = 0; w < FEATURE_WORDS; w++) {
1775 FeatureWordInfo *wi = &feature_word_info[w];
1776 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1777 qwi->cpuid_input_eax = wi->cpuid_eax;
1778 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1779 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1780 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1781 qwi->features = array[w];
1783 /* List will be in reverse order, but order shouldn't matter */
1784 list_entries[w].next = list;
1785 list_entries[w].value = &word_infos[w];
1786 list = &list_entries[w];
1789 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1790 error_propagate(errp, err);
1793 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1794 const char *name, Error **errp)
1796 X86CPU *cpu = X86_CPU(obj);
1797 int64_t value = cpu->hyperv_spinlock_attempts;
1799 visit_type_int(v, &value, name, errp);
1802 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1803 const char *name, Error **errp)
1805 const int64_t min = 0xFFF;
1806 const int64_t max = UINT_MAX;
1807 X86CPU *cpu = X86_CPU(obj);
1808 Error *err = NULL;
1809 int64_t value;
1811 visit_type_int(v, &value, name, &err);
1812 if (err) {
1813 error_propagate(errp, err);
1814 return;
1817 if (value < min || value > max) {
1818 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1819 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1820 object_get_typename(obj), name ? name : "null",
1821 value, min, max);
1822 return;
1824 cpu->hyperv_spinlock_attempts = value;
1827 static PropertyInfo qdev_prop_spinlocks = {
1828 .name = "int",
1829 .get = x86_get_hv_spinlocks,
1830 .set = x86_set_hv_spinlocks,
1833 /* Convert all '_' in a feature string option name to '-', to make feature
1834 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1836 static inline void feat2prop(char *s)
1838 while ((s = strchr(s, '_'))) {
1839 *s = '-';
1843 /* Parse "+feature,-feature,feature=foo" CPU feature string
1845 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1846 Error **errp)
1848 X86CPU *cpu = X86_CPU(cs);
1849 char *featurestr; /* Single 'key=value" string being parsed */
1850 FeatureWord w;
1851 /* Features to be added */
1852 FeatureWordArray plus_features = { 0 };
1853 /* Features to be removed */
1854 FeatureWordArray minus_features = { 0 };
1855 uint32_t numvalue;
1856 CPUX86State *env = &cpu->env;
1857 Error *local_err = NULL;
1859 featurestr = features ? strtok(features, ",") : NULL;
1861 while (featurestr) {
1862 char *val;
1863 if (featurestr[0] == '+') {
1864 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1865 } else if (featurestr[0] == '-') {
1866 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1867 } else if ((val = strchr(featurestr, '='))) {
1868 *val = 0; val++;
1869 feat2prop(featurestr);
1870 if (!strcmp(featurestr, "xlevel")) {
1871 char *err;
1872 char num[32];
1874 numvalue = strtoul(val, &err, 0);
1875 if (!*val || *err) {
1876 error_setg(errp, "bad numerical value %s", val);
1877 return;
1879 if (numvalue < 0x80000000) {
1880 error_report("xlevel value shall always be >= 0x80000000"
1881 ", fixup will be removed in future versions");
1882 numvalue += 0x80000000;
1884 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1885 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1886 } else if (!strcmp(featurestr, "tsc-freq")) {
1887 int64_t tsc_freq;
1888 char *err;
1889 char num[32];
1891 tsc_freq = strtosz_suffix_unit(val, &err,
1892 STRTOSZ_DEFSUFFIX_B, 1000);
1893 if (tsc_freq < 0 || *err) {
1894 error_setg(errp, "bad numerical value %s", val);
1895 return;
1897 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1898 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1899 &local_err);
1900 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1901 char *err;
1902 const int min = 0xFFF;
1903 char num[32];
1904 numvalue = strtoul(val, &err, 0);
1905 if (!*val || *err) {
1906 error_setg(errp, "bad numerical value %s", val);
1907 return;
1909 if (numvalue < min) {
1910 error_report("hv-spinlocks value shall always be >= 0x%x"
1911 ", fixup will be removed in future versions",
1912 min);
1913 numvalue = min;
1915 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1916 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1917 } else {
1918 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1920 } else {
1921 feat2prop(featurestr);
1922 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1924 if (local_err) {
1925 error_propagate(errp, local_err);
1926 return;
1928 featurestr = strtok(NULL, ",");
1931 if (cpu->host_features) {
1932 for (w = 0; w < FEATURE_WORDS; w++) {
1933 env->features[w] =
1934 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1938 for (w = 0; w < FEATURE_WORDS; w++) {
1939 env->features[w] |= plus_features[w];
1940 env->features[w] &= ~minus_features[w];
1944 /* Print all cpuid feature names in featureset
1946 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1948 int bit;
1949 bool first = true;
1951 for (bit = 0; bit < 32; bit++) {
1952 if (featureset[bit]) {
1953 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1954 first = false;
1959 /* generate CPU information. */
1960 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1962 X86CPUDefinition *def;
1963 char buf[256];
1964 int i;
1966 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1967 def = &builtin_x86_defs[i];
1968 snprintf(buf, sizeof(buf), "%s", def->name);
1969 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1971 #ifdef CONFIG_KVM
1972 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1973 "KVM processor with all supported host features "
1974 "(only available in KVM mode)");
1975 #endif
1977 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1978 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1979 FeatureWordInfo *fw = &feature_word_info[i];
1981 (*cpu_fprintf)(f, " ");
1982 listflags(f, cpu_fprintf, fw->feat_names);
1983 (*cpu_fprintf)(f, "\n");
1987 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1989 CpuDefinitionInfoList *cpu_list = NULL;
1990 X86CPUDefinition *def;
1991 int i;
1993 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1994 CpuDefinitionInfoList *entry;
1995 CpuDefinitionInfo *info;
1997 def = &builtin_x86_defs[i];
1998 info = g_malloc0(sizeof(*info));
1999 info->name = g_strdup(def->name);
2001 entry = g_malloc0(sizeof(*entry));
2002 entry->value = info;
2003 entry->next = cpu_list;
2004 cpu_list = entry;
2007 return cpu_list;
2010 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2011 bool migratable_only)
2013 FeatureWordInfo *wi = &feature_word_info[w];
2014 uint32_t r;
2016 if (kvm_enabled()) {
2017 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2018 wi->cpuid_ecx,
2019 wi->cpuid_reg);
2020 } else if (tcg_enabled()) {
2021 r = wi->tcg_features;
2022 } else {
2023 return ~0;
2025 if (migratable_only) {
2026 r &= x86_cpu_get_migratable_flags(w);
2028 return r;
2032 * Filters CPU feature words based on host availability of each feature.
2034 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2036 static int x86_cpu_filter_features(X86CPU *cpu)
2038 CPUX86State *env = &cpu->env;
2039 FeatureWord w;
2040 int rv = 0;
2042 for (w = 0; w < FEATURE_WORDS; w++) {
2043 uint32_t host_feat =
2044 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2045 uint32_t requested_features = env->features[w];
2046 env->features[w] &= host_feat;
2047 cpu->filtered_features[w] = requested_features & ~env->features[w];
2048 if (cpu->filtered_features[w]) {
2049 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2050 report_unavailable_features(w, cpu->filtered_features[w]);
2052 rv = 1;
2056 return rv;
2059 /* Load data from X86CPUDefinition
2061 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2063 CPUX86State *env = &cpu->env;
2064 const char *vendor;
2065 char host_vendor[CPUID_VENDOR_SZ + 1];
2066 FeatureWord w;
2068 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2069 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2070 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2071 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2072 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2073 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2074 cpu->cache_info_passthrough = def->cache_info_passthrough;
2075 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2076 for (w = 0; w < FEATURE_WORDS; w++) {
2077 env->features[w] = def->features[w];
2080 /* Special cases not set in the X86CPUDefinition structs: */
2081 if (kvm_enabled()) {
2082 FeatureWord w;
2083 for (w = 0; w < FEATURE_WORDS; w++) {
2084 env->features[w] |= kvm_default_features[w];
2085 env->features[w] &= ~kvm_default_unset_features[w];
2089 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2091 /* sysenter isn't supported in compatibility mode on AMD,
2092 * syscall isn't supported in compatibility mode on Intel.
2093 * Normally we advertise the actual CPU vendor, but you can
2094 * override this using the 'vendor' property if you want to use
2095 * KVM's sysenter/syscall emulation in compatibility mode and
2096 * when doing cross vendor migration
2098 vendor = def->vendor;
2099 if (kvm_enabled()) {
2100 uint32_t ebx = 0, ecx = 0, edx = 0;
2101 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2102 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2103 vendor = host_vendor;
2106 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2110 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2112 X86CPU *cpu = NULL;
2113 X86CPUClass *xcc;
2114 ObjectClass *oc;
2115 gchar **model_pieces;
2116 char *name, *features;
2117 Error *error = NULL;
2119 model_pieces = g_strsplit(cpu_model, ",", 2);
2120 if (!model_pieces[0]) {
2121 error_setg(&error, "Invalid/empty CPU model name");
2122 goto out;
2124 name = model_pieces[0];
2125 features = model_pieces[1];
2127 oc = x86_cpu_class_by_name(name);
2128 if (oc == NULL) {
2129 error_setg(&error, "Unable to find CPU definition: %s", name);
2130 goto out;
2132 xcc = X86_CPU_CLASS(oc);
2134 if (xcc->kvm_required && !kvm_enabled()) {
2135 error_setg(&error, "CPU model '%s' requires KVM", name);
2136 goto out;
2139 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2141 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2142 if (error) {
2143 goto out;
2146 out:
2147 if (error != NULL) {
2148 error_propagate(errp, error);
2149 if (cpu) {
2150 object_unref(OBJECT(cpu));
2151 cpu = NULL;
2154 g_strfreev(model_pieces);
2155 return cpu;
2158 X86CPU *cpu_x86_init(const char *cpu_model)
2160 Error *error = NULL;
2161 X86CPU *cpu;
2163 cpu = cpu_x86_create(cpu_model, &error);
2164 if (error) {
2165 goto out;
2168 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2170 out:
2171 if (error) {
2172 error_report_err(error);
2173 if (cpu != NULL) {
2174 object_unref(OBJECT(cpu));
2175 cpu = NULL;
2178 return cpu;
2181 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2183 X86CPUDefinition *cpudef = data;
2184 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2186 xcc->cpu_def = cpudef;
2189 static void x86_register_cpudef_type(X86CPUDefinition *def)
2191 char *typename = x86_cpu_type_name(def->name);
2192 TypeInfo ti = {
2193 .name = typename,
2194 .parent = TYPE_X86_CPU,
2195 .class_init = x86_cpu_cpudef_class_init,
2196 .class_data = def,
2199 type_register(&ti);
2200 g_free(typename);
2203 #if !defined(CONFIG_USER_ONLY)
2205 void cpu_clear_apic_feature(CPUX86State *env)
2207 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2210 #endif /* !CONFIG_USER_ONLY */
2212 /* Initialize list of CPU models, filling some non-static fields if necessary
2214 void x86_cpudef_setup(void)
2216 int i, j;
2217 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2219 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2220 X86CPUDefinition *def = &builtin_x86_defs[i];
2222 /* Look for specific "cpudef" models that */
2223 /* have the QEMU version in .model_id */
2224 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2225 if (strcmp(model_with_versions[j], def->name) == 0) {
2226 pstrcpy(def->model_id, sizeof(def->model_id),
2227 "QEMU Virtual CPU version ");
2228 pstrcat(def->model_id, sizeof(def->model_id),
2229 qemu_get_version());
2230 break;
2236 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2237 uint32_t *eax, uint32_t *ebx,
2238 uint32_t *ecx, uint32_t *edx)
2240 X86CPU *cpu = x86_env_get_cpu(env);
2241 CPUState *cs = CPU(cpu);
2243 /* test if maximum index reached */
2244 if (index & 0x80000000) {
2245 if (index > env->cpuid_xlevel) {
2246 if (env->cpuid_xlevel2 > 0) {
2247 /* Handle the Centaur's CPUID instruction. */
2248 if (index > env->cpuid_xlevel2) {
2249 index = env->cpuid_xlevel2;
2250 } else if (index < 0xC0000000) {
2251 index = env->cpuid_xlevel;
2253 } else {
2254 /* Intel documentation states that invalid EAX input will
2255 * return the same information as EAX=cpuid_level
2256 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2258 index = env->cpuid_level;
2261 } else {
2262 if (index > env->cpuid_level)
2263 index = env->cpuid_level;
2266 switch(index) {
2267 case 0:
2268 *eax = env->cpuid_level;
2269 *ebx = env->cpuid_vendor1;
2270 *edx = env->cpuid_vendor2;
2271 *ecx = env->cpuid_vendor3;
2272 break;
2273 case 1:
2274 *eax = env->cpuid_version;
2275 *ebx = (cpu->apic_id << 24) |
2276 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2277 *ecx = env->features[FEAT_1_ECX];
2278 *edx = env->features[FEAT_1_EDX];
2279 if (cs->nr_cores * cs->nr_threads > 1) {
2280 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2281 *edx |= 1 << 28; /* HTT bit */
2283 break;
2284 case 2:
2285 /* cache info: needed for Pentium Pro compatibility */
2286 if (cpu->cache_info_passthrough) {
2287 host_cpuid(index, 0, eax, ebx, ecx, edx);
2288 break;
2290 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2291 *ebx = 0;
2292 *ecx = 0;
2293 *edx = (L1D_DESCRIPTOR << 16) | \
2294 (L1I_DESCRIPTOR << 8) | \
2295 (L2_DESCRIPTOR);
2296 break;
2297 case 4:
2298 /* cache info: needed for Core compatibility */
2299 if (cpu->cache_info_passthrough) {
2300 host_cpuid(index, count, eax, ebx, ecx, edx);
2301 *eax &= ~0xFC000000;
2302 } else {
2303 *eax = 0;
2304 switch (count) {
2305 case 0: /* L1 dcache info */
2306 *eax |= CPUID_4_TYPE_DCACHE | \
2307 CPUID_4_LEVEL(1) | \
2308 CPUID_4_SELF_INIT_LEVEL;
2309 *ebx = (L1D_LINE_SIZE - 1) | \
2310 ((L1D_PARTITIONS - 1) << 12) | \
2311 ((L1D_ASSOCIATIVITY - 1) << 22);
2312 *ecx = L1D_SETS - 1;
2313 *edx = CPUID_4_NO_INVD_SHARING;
2314 break;
2315 case 1: /* L1 icache info */
2316 *eax |= CPUID_4_TYPE_ICACHE | \
2317 CPUID_4_LEVEL(1) | \
2318 CPUID_4_SELF_INIT_LEVEL;
2319 *ebx = (L1I_LINE_SIZE - 1) | \
2320 ((L1I_PARTITIONS - 1) << 12) | \
2321 ((L1I_ASSOCIATIVITY - 1) << 22);
2322 *ecx = L1I_SETS - 1;
2323 *edx = CPUID_4_NO_INVD_SHARING;
2324 break;
2325 case 2: /* L2 cache info */
2326 *eax |= CPUID_4_TYPE_UNIFIED | \
2327 CPUID_4_LEVEL(2) | \
2328 CPUID_4_SELF_INIT_LEVEL;
2329 if (cs->nr_threads > 1) {
2330 *eax |= (cs->nr_threads - 1) << 14;
2332 *ebx = (L2_LINE_SIZE - 1) | \
2333 ((L2_PARTITIONS - 1) << 12) | \
2334 ((L2_ASSOCIATIVITY - 1) << 22);
2335 *ecx = L2_SETS - 1;
2336 *edx = CPUID_4_NO_INVD_SHARING;
2337 break;
2338 default: /* end of info */
2339 *eax = 0;
2340 *ebx = 0;
2341 *ecx = 0;
2342 *edx = 0;
2343 break;
2347 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2348 if ((*eax & 31) && cs->nr_cores > 1) {
2349 *eax |= (cs->nr_cores - 1) << 26;
2351 break;
2352 case 5:
2353 /* mwait info: needed for Core compatibility */
2354 *eax = 0; /* Smallest monitor-line size in bytes */
2355 *ebx = 0; /* Largest monitor-line size in bytes */
2356 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2357 *edx = 0;
2358 break;
2359 case 6:
2360 /* Thermal and Power Leaf */
2361 *eax = 0;
2362 *ebx = 0;
2363 *ecx = 0;
2364 *edx = 0;
2365 break;
2366 case 7:
2367 /* Structured Extended Feature Flags Enumeration Leaf */
2368 if (count == 0) {
2369 *eax = 0; /* Maximum ECX value for sub-leaves */
2370 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2371 *ecx = 0; /* Reserved */
2372 *edx = 0; /* Reserved */
2373 } else {
2374 *eax = 0;
2375 *ebx = 0;
2376 *ecx = 0;
2377 *edx = 0;
2379 break;
2380 case 9:
2381 /* Direct Cache Access Information Leaf */
2382 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2383 *ebx = 0;
2384 *ecx = 0;
2385 *edx = 0;
2386 break;
2387 case 0xA:
2388 /* Architectural Performance Monitoring Leaf */
2389 if (kvm_enabled() && cpu->enable_pmu) {
2390 KVMState *s = cs->kvm_state;
2392 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2393 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2394 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2395 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2396 } else {
2397 *eax = 0;
2398 *ebx = 0;
2399 *ecx = 0;
2400 *edx = 0;
2402 break;
2403 case 0xD: {
2404 KVMState *s = cs->kvm_state;
2405 uint64_t kvm_mask;
2406 int i;
2408 /* Processor Extended State */
2409 *eax = 0;
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2414 break;
2416 kvm_mask =
2417 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2418 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2420 if (count == 0) {
2421 *ecx = 0x240;
2422 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2423 const ExtSaveArea *esa = &ext_save_areas[i];
2424 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2425 (kvm_mask & (1 << i)) != 0) {
2426 if (i < 32) {
2427 *eax |= 1 << i;
2428 } else {
2429 *edx |= 1 << (i - 32);
2431 *ecx = MAX(*ecx, esa->offset + esa->size);
2434 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2435 *ebx = *ecx;
2436 } else if (count == 1) {
2437 *eax = env->features[FEAT_XSAVE];
2438 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2439 const ExtSaveArea *esa = &ext_save_areas[count];
2440 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2441 (kvm_mask & (1 << count)) != 0) {
2442 *eax = esa->size;
2443 *ebx = esa->offset;
2446 break;
2448 case 0x80000000:
2449 *eax = env->cpuid_xlevel;
2450 *ebx = env->cpuid_vendor1;
2451 *edx = env->cpuid_vendor2;
2452 *ecx = env->cpuid_vendor3;
2453 break;
2454 case 0x80000001:
2455 *eax = env->cpuid_version;
2456 *ebx = 0;
2457 *ecx = env->features[FEAT_8000_0001_ECX];
2458 *edx = env->features[FEAT_8000_0001_EDX];
2460 /* The Linux kernel checks for the CMPLegacy bit and
2461 * discards multiple thread information if it is set.
2462 * So dont set it here for Intel to make Linux guests happy.
2464 if (cs->nr_cores * cs->nr_threads > 1) {
2465 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2466 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2467 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2468 *ecx |= 1 << 1; /* CmpLegacy bit */
2471 break;
2472 case 0x80000002:
2473 case 0x80000003:
2474 case 0x80000004:
2475 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2476 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2477 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2478 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2479 break;
2480 case 0x80000005:
2481 /* cache info (L1 cache) */
2482 if (cpu->cache_info_passthrough) {
2483 host_cpuid(index, 0, eax, ebx, ecx, edx);
2484 break;
2486 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2487 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2488 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2489 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2490 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2491 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2492 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2493 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2494 break;
2495 case 0x80000006:
2496 /* cache info (L2 cache) */
2497 if (cpu->cache_info_passthrough) {
2498 host_cpuid(index, 0, eax, ebx, ecx, edx);
2499 break;
2501 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2502 (L2_DTLB_2M_ENTRIES << 16) | \
2503 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2504 (L2_ITLB_2M_ENTRIES);
2505 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2506 (L2_DTLB_4K_ENTRIES << 16) | \
2507 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2508 (L2_ITLB_4K_ENTRIES);
2509 *ecx = (L2_SIZE_KB_AMD << 16) | \
2510 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2511 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2512 *edx = ((L3_SIZE_KB/512) << 18) | \
2513 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2514 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2515 break;
2516 case 0x80000007:
2517 *eax = 0;
2518 *ebx = 0;
2519 *ecx = 0;
2520 *edx = env->features[FEAT_8000_0007_EDX];
2521 break;
2522 case 0x80000008:
2523 /* virtual & phys address size in low 2 bytes. */
2524 /* XXX: This value must match the one used in the MMU code. */
2525 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2526 /* 64 bit processor */
2527 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2528 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2529 } else {
2530 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2531 *eax = 0x00000024; /* 36 bits physical */
2532 } else {
2533 *eax = 0x00000020; /* 32 bits physical */
2536 *ebx = 0;
2537 *ecx = 0;
2538 *edx = 0;
2539 if (cs->nr_cores * cs->nr_threads > 1) {
2540 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2542 break;
2543 case 0x8000000A:
2544 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2545 *eax = 0x00000001; /* SVM Revision */
2546 *ebx = 0x00000010; /* nr of ASIDs */
2547 *ecx = 0;
2548 *edx = env->features[FEAT_SVM]; /* optional features */
2549 } else {
2550 *eax = 0;
2551 *ebx = 0;
2552 *ecx = 0;
2553 *edx = 0;
2555 break;
2556 case 0xC0000000:
2557 *eax = env->cpuid_xlevel2;
2558 *ebx = 0;
2559 *ecx = 0;
2560 *edx = 0;
2561 break;
2562 case 0xC0000001:
2563 /* Support for VIA CPU's CPUID instruction */
2564 *eax = env->cpuid_version;
2565 *ebx = 0;
2566 *ecx = 0;
2567 *edx = env->features[FEAT_C000_0001_EDX];
2568 break;
2569 case 0xC0000002:
2570 case 0xC0000003:
2571 case 0xC0000004:
2572 /* Reserved for the future, and now filled with zero */
2573 *eax = 0;
2574 *ebx = 0;
2575 *ecx = 0;
2576 *edx = 0;
2577 break;
2578 default:
2579 /* reserved values: zero */
2580 *eax = 0;
2581 *ebx = 0;
2582 *ecx = 0;
2583 *edx = 0;
2584 break;
2588 /* CPUClass::reset() */
2589 static void x86_cpu_reset(CPUState *s)
2591 X86CPU *cpu = X86_CPU(s);
2592 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2593 CPUX86State *env = &cpu->env;
2594 int i;
2596 xcc->parent_reset(s);
2598 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2600 tlb_flush(s, 1);
2602 env->old_exception = -1;
2604 /* init to reset state */
2606 #ifdef CONFIG_SOFTMMU
2607 env->hflags |= HF_SOFTMMU_MASK;
2608 #endif
2609 env->hflags2 |= HF2_GIF_MASK;
2611 cpu_x86_update_cr0(env, 0x60000010);
2612 env->a20_mask = ~0x0;
2613 env->smbase = 0x30000;
2615 env->idt.limit = 0xffff;
2616 env->gdt.limit = 0xffff;
2617 env->ldt.limit = 0xffff;
2618 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2619 env->tr.limit = 0xffff;
2620 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2622 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2623 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2624 DESC_R_MASK | DESC_A_MASK);
2625 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2626 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2627 DESC_A_MASK);
2628 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2629 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2630 DESC_A_MASK);
2631 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2632 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2633 DESC_A_MASK);
2634 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2635 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2636 DESC_A_MASK);
2637 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2638 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2639 DESC_A_MASK);
2641 env->eip = 0xfff0;
2642 env->regs[R_EDX] = env->cpuid_version;
2644 env->eflags = 0x2;
2646 /* FPU init */
2647 for (i = 0; i < 8; i++) {
2648 env->fptags[i] = 1;
2650 cpu_set_fpuc(env, 0x37f);
2652 env->mxcsr = 0x1f80;
2653 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2655 env->pat = 0x0007040600070406ULL;
2656 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2658 memset(env->dr, 0, sizeof(env->dr));
2659 env->dr[6] = DR6_FIXED_1;
2660 env->dr[7] = DR7_FIXED_1;
2661 cpu_breakpoint_remove_all(s, BP_CPU);
2662 cpu_watchpoint_remove_all(s, BP_CPU);
2664 env->xcr0 = 1;
2667 * SDM 11.11.5 requires:
2668 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2669 * - IA32_MTRR_PHYSMASKn.V = 0
2670 * All other bits are undefined. For simplification, zero it all.
2672 env->mtrr_deftype = 0;
2673 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2674 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2676 #if !defined(CONFIG_USER_ONLY)
2677 /* We hard-wire the BSP to the first CPU. */
2678 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2680 s->halted = !cpu_is_bsp(cpu);
2682 if (kvm_enabled()) {
2683 kvm_arch_reset_vcpu(cpu);
2685 #endif
2688 #ifndef CONFIG_USER_ONLY
2689 bool cpu_is_bsp(X86CPU *cpu)
2691 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2694 /* TODO: remove me, when reset over QOM tree is implemented */
2695 static void x86_cpu_machine_reset_cb(void *opaque)
2697 X86CPU *cpu = opaque;
2698 cpu_reset(CPU(cpu));
2700 #endif
2702 static void mce_init(X86CPU *cpu)
2704 CPUX86State *cenv = &cpu->env;
2705 unsigned int bank;
2707 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2708 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2709 (CPUID_MCE | CPUID_MCA)) {
2710 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2711 cenv->mcg_ctl = ~(uint64_t)0;
2712 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2713 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2718 #ifndef CONFIG_USER_ONLY
2719 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2721 DeviceState *dev = DEVICE(cpu);
2722 APICCommonState *apic;
2723 const char *apic_type = "apic";
2725 if (kvm_irqchip_in_kernel()) {
2726 apic_type = "kvm-apic";
2727 } else if (xen_enabled()) {
2728 apic_type = "xen-apic";
2731 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2732 if (cpu->apic_state == NULL) {
2733 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2734 return;
2737 object_property_add_child(OBJECT(cpu), "apic",
2738 OBJECT(cpu->apic_state), NULL);
2739 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2740 /* TODO: convert to link<> */
2741 apic = APIC_COMMON(cpu->apic_state);
2742 apic->cpu = cpu;
2745 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2747 if (cpu->apic_state == NULL) {
2748 return;
2750 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2751 errp);
2754 static void x86_cpu_machine_done(Notifier *n, void *unused)
2756 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2757 MemoryRegion *smram =
2758 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2760 if (smram) {
2761 cpu->smram = g_new(MemoryRegion, 1);
2762 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2763 smram, 0, 1ull << 32);
2764 memory_region_set_enabled(cpu->smram, false);
2765 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2768 #else
2769 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2772 #endif
2775 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2776 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2777 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2778 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2779 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2780 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2781 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2783 CPUState *cs = CPU(dev);
2784 X86CPU *cpu = X86_CPU(dev);
2785 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2786 CPUX86State *env = &cpu->env;
2787 Error *local_err = NULL;
2788 static bool ht_warned;
2790 if (cpu->apic_id < 0) {
2791 error_setg(errp, "apic-id property was not initialized properly");
2792 return;
2795 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2796 env->cpuid_level = 7;
2799 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2800 * CPUID[1].EDX.
2802 if (IS_AMD_CPU(env)) {
2803 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2804 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2805 & CPUID_EXT2_AMD_ALIASES);
2809 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2810 error_setg(&local_err,
2811 kvm_enabled() ?
2812 "Host doesn't support requested features" :
2813 "TCG doesn't support requested features");
2814 goto out;
2817 #ifndef CONFIG_USER_ONLY
2818 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2820 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2821 x86_cpu_apic_create(cpu, &local_err);
2822 if (local_err != NULL) {
2823 goto out;
2826 #endif
2828 mce_init(cpu);
2830 #ifndef CONFIG_USER_ONLY
2831 if (tcg_enabled()) {
2832 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2833 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2834 cs->as = g_new(AddressSpace, 1);
2836 /* Outer container... */
2837 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2838 memory_region_set_enabled(cpu->cpu_as_root, true);
2840 /* ... with two regions inside: normal system memory with low
2841 * priority, and...
2843 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2844 get_system_memory(), 0, ~0ull);
2845 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2846 memory_region_set_enabled(cpu->cpu_as_mem, true);
2847 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2849 /* ... SMRAM with higher priority, linked from /machine/smram. */
2850 cpu->machine_done.notify = x86_cpu_machine_done;
2851 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2853 #endif
2855 qemu_init_vcpu(cs);
2857 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2858 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2859 * based on inputs (sockets,cores,threads), it is still better to gives
2860 * users a warning.
2862 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2863 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2865 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2866 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2867 " -smp options properly.");
2868 ht_warned = true;
2871 x86_cpu_apic_realize(cpu, &local_err);
2872 if (local_err != NULL) {
2873 goto out;
2875 cpu_reset(cs);
2877 xcc->parent_realize(dev, &local_err);
2879 out:
2880 if (local_err != NULL) {
2881 error_propagate(errp, local_err);
2882 return;
2886 typedef struct BitProperty {
2887 uint32_t *ptr;
2888 uint32_t mask;
2889 } BitProperty;
2891 static void x86_cpu_get_bit_prop(Object *obj,
2892 struct Visitor *v,
2893 void *opaque,
2894 const char *name,
2895 Error **errp)
2897 BitProperty *fp = opaque;
2898 bool value = (*fp->ptr & fp->mask) == fp->mask;
2899 visit_type_bool(v, &value, name, errp);
2902 static void x86_cpu_set_bit_prop(Object *obj,
2903 struct Visitor *v,
2904 void *opaque,
2905 const char *name,
2906 Error **errp)
2908 DeviceState *dev = DEVICE(obj);
2909 BitProperty *fp = opaque;
2910 Error *local_err = NULL;
2911 bool value;
2913 if (dev->realized) {
2914 qdev_prop_set_after_realize(dev, name, errp);
2915 return;
2918 visit_type_bool(v, &value, name, &local_err);
2919 if (local_err) {
2920 error_propagate(errp, local_err);
2921 return;
2924 if (value) {
2925 *fp->ptr |= fp->mask;
2926 } else {
2927 *fp->ptr &= ~fp->mask;
2931 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2932 void *opaque)
2934 BitProperty *prop = opaque;
2935 g_free(prop);
2938 /* Register a boolean property to get/set a single bit in a uint32_t field.
2940 * The same property name can be registered multiple times to make it affect
2941 * multiple bits in the same FeatureWord. In that case, the getter will return
2942 * true only if all bits are set.
2944 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2945 const char *prop_name,
2946 uint32_t *field,
2947 int bitnr)
2949 BitProperty *fp;
2950 ObjectProperty *op;
2951 uint32_t mask = (1UL << bitnr);
2953 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2954 if (op) {
2955 fp = op->opaque;
2956 assert(fp->ptr == field);
2957 fp->mask |= mask;
2958 } else {
2959 fp = g_new0(BitProperty, 1);
2960 fp->ptr = field;
2961 fp->mask = mask;
2962 object_property_add(OBJECT(cpu), prop_name, "bool",
2963 x86_cpu_get_bit_prop,
2964 x86_cpu_set_bit_prop,
2965 x86_cpu_release_bit_prop, fp, &error_abort);
2969 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2970 FeatureWord w,
2971 int bitnr)
2973 Object *obj = OBJECT(cpu);
2974 int i;
2975 char **names;
2976 FeatureWordInfo *fi = &feature_word_info[w];
2978 if (!fi->feat_names) {
2979 return;
2981 if (!fi->feat_names[bitnr]) {
2982 return;
2985 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
2987 feat2prop(names[0]);
2988 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
2990 for (i = 1; names[i]; i++) {
2991 feat2prop(names[i]);
2992 object_property_add_alias(obj, names[i], obj, g_strdup(names[0]),
2993 &error_abort);
2996 g_strfreev(names);
2999 static void x86_cpu_initfn(Object *obj)
3001 CPUState *cs = CPU(obj);
3002 X86CPU *cpu = X86_CPU(obj);
3003 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3004 CPUX86State *env = &cpu->env;
3005 FeatureWord w;
3006 static int inited;
3008 cs->env_ptr = env;
3009 cpu_exec_init(env);
3011 object_property_add(obj, "family", "int",
3012 x86_cpuid_version_get_family,
3013 x86_cpuid_version_set_family, NULL, NULL, NULL);
3014 object_property_add(obj, "model", "int",
3015 x86_cpuid_version_get_model,
3016 x86_cpuid_version_set_model, NULL, NULL, NULL);
3017 object_property_add(obj, "stepping", "int",
3018 x86_cpuid_version_get_stepping,
3019 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3020 object_property_add_str(obj, "vendor",
3021 x86_cpuid_get_vendor,
3022 x86_cpuid_set_vendor, NULL);
3023 object_property_add_str(obj, "model-id",
3024 x86_cpuid_get_model_id,
3025 x86_cpuid_set_model_id, NULL);
3026 object_property_add(obj, "tsc-frequency", "int",
3027 x86_cpuid_get_tsc_freq,
3028 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3029 object_property_add(obj, "apic-id", "int",
3030 x86_cpuid_get_apic_id,
3031 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3032 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3033 x86_cpu_get_feature_words,
3034 NULL, NULL, (void *)env->features, NULL);
3035 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3036 x86_cpu_get_feature_words,
3037 NULL, NULL, (void *)cpu->filtered_features, NULL);
3039 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3041 #ifndef CONFIG_USER_ONLY
3042 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3043 cpu->apic_id = -1;
3044 #endif
3046 for (w = 0; w < FEATURE_WORDS; w++) {
3047 int bitnr;
3049 for (bitnr = 0; bitnr < 32; bitnr++) {
3050 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3054 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3056 /* init various static tables used in TCG mode */
3057 if (tcg_enabled() && !inited) {
3058 inited = 1;
3059 optimize_flags_init();
3063 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3065 X86CPU *cpu = X86_CPU(cs);
3067 return cpu->apic_id;
3070 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3072 X86CPU *cpu = X86_CPU(cs);
3074 return cpu->env.cr[0] & CR0_PG_MASK;
3077 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3079 X86CPU *cpu = X86_CPU(cs);
3081 cpu->env.eip = value;
3084 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3086 X86CPU *cpu = X86_CPU(cs);
3088 cpu->env.eip = tb->pc - tb->cs_base;
3091 static bool x86_cpu_has_work(CPUState *cs)
3093 X86CPU *cpu = X86_CPU(cs);
3094 CPUX86State *env = &cpu->env;
3096 #if !defined(CONFIG_USER_ONLY)
3097 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3098 apic_poll_irq(cpu->apic_state);
3099 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
3101 #endif
3103 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3104 (env->eflags & IF_MASK)) ||
3105 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3106 CPU_INTERRUPT_INIT |
3107 CPU_INTERRUPT_SIPI |
3108 CPU_INTERRUPT_MCE)) ||
3109 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3110 !(env->hflags & HF_SMM_MASK));
3113 static Property x86_cpu_properties[] = {
3114 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3115 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3116 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3117 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3118 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3119 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3120 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3121 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3122 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3123 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3124 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3125 DEFINE_PROP_END_OF_LIST()
3128 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3130 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3131 CPUClass *cc = CPU_CLASS(oc);
3132 DeviceClass *dc = DEVICE_CLASS(oc);
3134 xcc->parent_realize = dc->realize;
3135 dc->realize = x86_cpu_realizefn;
3136 dc->bus_type = TYPE_ICC_BUS;
3137 dc->props = x86_cpu_properties;
3139 xcc->parent_reset = cc->reset;
3140 cc->reset = x86_cpu_reset;
3141 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3143 cc->class_by_name = x86_cpu_class_by_name;
3144 cc->parse_features = x86_cpu_parse_featurestr;
3145 cc->has_work = x86_cpu_has_work;
3146 cc->do_interrupt = x86_cpu_do_interrupt;
3147 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3148 cc->dump_state = x86_cpu_dump_state;
3149 cc->set_pc = x86_cpu_set_pc;
3150 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3151 cc->gdb_read_register = x86_cpu_gdb_read_register;
3152 cc->gdb_write_register = x86_cpu_gdb_write_register;
3153 cc->get_arch_id = x86_cpu_get_arch_id;
3154 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3155 #ifdef CONFIG_USER_ONLY
3156 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3157 #else
3158 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3159 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3160 cc->write_elf64_note = x86_cpu_write_elf64_note;
3161 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3162 cc->write_elf32_note = x86_cpu_write_elf32_note;
3163 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3164 cc->vmsd = &vmstate_x86_cpu;
3165 #endif
3166 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3167 #ifndef CONFIG_USER_ONLY
3168 cc->debug_excp_handler = breakpoint_handler;
3169 #endif
3170 cc->cpu_exec_enter = x86_cpu_exec_enter;
3171 cc->cpu_exec_exit = x86_cpu_exec_exit;
3174 static const TypeInfo x86_cpu_type_info = {
3175 .name = TYPE_X86_CPU,
3176 .parent = TYPE_CPU,
3177 .instance_size = sizeof(X86CPU),
3178 .instance_init = x86_cpu_initfn,
3179 .abstract = true,
3180 .class_size = sizeof(X86CPUClass),
3181 .class_init = x86_cpu_common_class_init,
3184 static void x86_cpu_register_types(void)
3186 int i;
3188 type_register_static(&x86_cpu_type_info);
3189 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3190 x86_register_cpudef_type(&builtin_x86_defs[i]);
3192 #ifdef CONFIG_KVM
3193 type_register_static(&host_x86_cpu_type_info);
3194 #endif
3197 type_init(x86_cpu_register_types)