vhost: correctly pass error to caller in vhost_dev_enable_notifiers()
[qemu/ar7.git] / target-i386 / cpu.c
blob4e7cdaaaa57e358647d992c8947a6251faf13a03
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #include "hw/hw.h"
39 #if defined(CONFIG_KVM)
40 #include <linux/kvm_para.h>
41 #endif
43 #include "sysemu/sysemu.h"
44 #include "hw/qdev-properties.h"
45 #include "hw/cpu/icc_bus.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
307 /* missing:
308 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
309 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
310 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
311 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
313 /* missing:
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
315 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
316 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
317 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
318 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
319 CPUID_EXT_RDRAND */
321 #ifdef TARGET_X86_64
322 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
323 #else
324 #define TCG_EXT2_X86_64_FEATURES 0
325 #endif
327 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
328 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
329 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
330 TCG_EXT2_X86_64_FEATURES)
331 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
332 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
333 #define TCG_EXT4_FEATURES 0
334 #define TCG_SVM_FEATURES 0
335 #define TCG_KVM_FEATURES 0
336 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
337 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
338 /* missing:
339 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
340 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
341 CPUID_7_0_EBX_RDSEED */
342 #define TCG_APM_FEATURES 0
345 typedef struct FeatureWordInfo {
346 const char **feat_names;
347 uint32_t cpuid_eax; /* Input EAX for CPUID */
348 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
349 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
350 int cpuid_reg; /* output register (R_* constant) */
351 uint32_t tcg_features; /* Feature flags supported by TCG */
352 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
353 } FeatureWordInfo;
355 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
356 [FEAT_1_EDX] = {
357 .feat_names = feature_name,
358 .cpuid_eax = 1, .cpuid_reg = R_EDX,
359 .tcg_features = TCG_FEATURES,
361 [FEAT_1_ECX] = {
362 .feat_names = ext_feature_name,
363 .cpuid_eax = 1, .cpuid_reg = R_ECX,
364 .tcg_features = TCG_EXT_FEATURES,
366 [FEAT_8000_0001_EDX] = {
367 .feat_names = ext2_feature_name,
368 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
369 .tcg_features = TCG_EXT2_FEATURES,
371 [FEAT_8000_0001_ECX] = {
372 .feat_names = ext3_feature_name,
373 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
374 .tcg_features = TCG_EXT3_FEATURES,
376 [FEAT_C000_0001_EDX] = {
377 .feat_names = ext4_feature_name,
378 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
379 .tcg_features = TCG_EXT4_FEATURES,
381 [FEAT_KVM] = {
382 .feat_names = kvm_feature_name,
383 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
384 .tcg_features = TCG_KVM_FEATURES,
386 [FEAT_SVM] = {
387 .feat_names = svm_feature_name,
388 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
389 .tcg_features = TCG_SVM_FEATURES,
391 [FEAT_7_0_EBX] = {
392 .feat_names = cpuid_7_0_ebx_feature_name,
393 .cpuid_eax = 7,
394 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
395 .cpuid_reg = R_EBX,
396 .tcg_features = TCG_7_0_EBX_FEATURES,
398 [FEAT_8000_0007_EDX] = {
399 .feat_names = cpuid_apm_edx_feature_name,
400 .cpuid_eax = 0x80000007,
401 .cpuid_reg = R_EDX,
402 .tcg_features = TCG_APM_FEATURES,
403 .unmigratable_flags = CPUID_APM_INVTSC,
405 [FEAT_XSAVE] = {
406 .feat_names = cpuid_xsave_feature_name,
407 .cpuid_eax = 0xd,
408 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
409 .cpuid_reg = R_EAX,
410 .tcg_features = 0,
414 typedef struct X86RegisterInfo32 {
415 /* Name of register */
416 const char *name;
417 /* QAPI enum value register */
418 X86CPURegister32 qapi_enum;
419 } X86RegisterInfo32;
421 #define REGISTER(reg) \
422 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
423 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
424 REGISTER(EAX),
425 REGISTER(ECX),
426 REGISTER(EDX),
427 REGISTER(EBX),
428 REGISTER(ESP),
429 REGISTER(EBP),
430 REGISTER(ESI),
431 REGISTER(EDI),
433 #undef REGISTER
435 typedef struct ExtSaveArea {
436 uint32_t feature, bits;
437 uint32_t offset, size;
438 } ExtSaveArea;
440 static const ExtSaveArea ext_save_areas[] = {
441 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
442 .offset = 0x240, .size = 0x100 },
443 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
444 .offset = 0x3c0, .size = 0x40 },
445 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
446 .offset = 0x400, .size = 0x40 },
447 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
448 .offset = 0x440, .size = 0x40 },
449 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
450 .offset = 0x480, .size = 0x200 },
451 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
452 .offset = 0x680, .size = 0x400 },
455 const char *get_register_name_32(unsigned int reg)
457 if (reg >= CPU_NB_REGS32) {
458 return NULL;
460 return x86_reg_info_32[reg].name;
463 /* KVM-specific features that are automatically added to all CPU models
464 * when KVM is enabled.
466 static uint32_t kvm_default_features[FEATURE_WORDS] = {
467 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
468 (1 << KVM_FEATURE_NOP_IO_DELAY) |
469 (1 << KVM_FEATURE_CLOCKSOURCE2) |
470 (1 << KVM_FEATURE_ASYNC_PF) |
471 (1 << KVM_FEATURE_STEAL_TIME) |
472 (1 << KVM_FEATURE_PV_EOI) |
473 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
474 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
477 /* Features that are not added by default to any CPU model when KVM is enabled.
479 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
480 [FEAT_1_EDX] = CPUID_ACPI,
481 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
482 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
485 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
487 kvm_default_features[w] &= ~features;
490 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
492 kvm_default_unset_features[w] &= ~features;
496 * Returns the set of feature flags that are supported and migratable by
497 * QEMU, for a given FeatureWord.
499 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
501 FeatureWordInfo *wi = &feature_word_info[w];
502 uint32_t r = 0;
503 int i;
505 for (i = 0; i < 32; i++) {
506 uint32_t f = 1U << i;
507 /* If the feature name is unknown, it is not supported by QEMU yet */
508 if (!wi->feat_names[i]) {
509 continue;
511 /* Skip features known to QEMU, but explicitly marked as unmigratable */
512 if (wi->unmigratable_flags & f) {
513 continue;
515 r |= f;
517 return r;
520 void host_cpuid(uint32_t function, uint32_t count,
521 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
523 uint32_t vec[4];
525 #ifdef __x86_64__
526 asm volatile("cpuid"
527 : "=a"(vec[0]), "=b"(vec[1]),
528 "=c"(vec[2]), "=d"(vec[3])
529 : "0"(function), "c"(count) : "cc");
530 #elif defined(__i386__)
531 asm volatile("pusha \n\t"
532 "cpuid \n\t"
533 "mov %%eax, 0(%2) \n\t"
534 "mov %%ebx, 4(%2) \n\t"
535 "mov %%ecx, 8(%2) \n\t"
536 "mov %%edx, 12(%2) \n\t"
537 "popa"
538 : : "a"(function), "c"(count), "S"(vec)
539 : "memory", "cc");
540 #else
541 abort();
542 #endif
544 if (eax)
545 *eax = vec[0];
546 if (ebx)
547 *ebx = vec[1];
548 if (ecx)
549 *ecx = vec[2];
550 if (edx)
551 *edx = vec[3];
554 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
556 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
557 * a substring. ex if !NULL points to the first char after a substring,
558 * otherwise the string is assumed to sized by a terminating nul.
559 * Return lexical ordering of *s1:*s2.
561 static int sstrcmp(const char *s1, const char *e1,
562 const char *s2, const char *e2)
564 for (;;) {
565 if (!*s1 || !*s2 || *s1 != *s2)
566 return (*s1 - *s2);
567 ++s1, ++s2;
568 if (s1 == e1 && s2 == e2)
569 return (0);
570 else if (s1 == e1)
571 return (*s2);
572 else if (s2 == e2)
573 return (*s1);
577 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
578 * '|' delimited (possibly empty) strings in which case search for a match
579 * within the alternatives proceeds left to right. Return 0 for success,
580 * non-zero otherwise.
582 static int altcmp(const char *s, const char *e, const char *altstr)
584 const char *p, *q;
586 for (q = p = altstr; ; ) {
587 while (*p && *p != '|')
588 ++p;
589 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
590 return (0);
591 if (!*p)
592 return (1);
593 else
594 q = ++p;
598 /* search featureset for flag *[s..e), if found set corresponding bit in
599 * *pval and return true, otherwise return false
601 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
602 const char **featureset)
604 uint32_t mask;
605 const char **ppc;
606 bool found = false;
608 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
609 if (*ppc && !altcmp(s, e, *ppc)) {
610 *pval |= mask;
611 found = true;
614 return found;
617 static void add_flagname_to_bitmaps(const char *flagname,
618 FeatureWordArray words,
619 Error **errp)
621 FeatureWord w;
622 for (w = 0; w < FEATURE_WORDS; w++) {
623 FeatureWordInfo *wi = &feature_word_info[w];
624 if (wi->feat_names &&
625 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
626 break;
629 if (w == FEATURE_WORDS) {
630 error_setg(errp, "CPU feature %s not found", flagname);
634 /* CPU class name definitions: */
636 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
637 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
639 /* Return type name for a given CPU model name
640 * Caller is responsible for freeing the returned string.
642 static char *x86_cpu_type_name(const char *model_name)
644 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
647 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
649 ObjectClass *oc;
650 char *typename;
652 if (cpu_model == NULL) {
653 return NULL;
656 typename = x86_cpu_type_name(cpu_model);
657 oc = object_class_by_name(typename);
658 g_free(typename);
659 return oc;
662 struct X86CPUDefinition {
663 const char *name;
664 uint32_t level;
665 uint32_t xlevel;
666 uint32_t xlevel2;
667 /* vendor is zero-terminated, 12 character ASCII string */
668 char vendor[CPUID_VENDOR_SZ + 1];
669 int family;
670 int model;
671 int stepping;
672 FeatureWordArray features;
673 char model_id[48];
674 bool cache_info_passthrough;
677 static X86CPUDefinition builtin_x86_defs[] = {
679 .name = "qemu64",
680 .level = 4,
681 .vendor = CPUID_VENDOR_AMD,
682 .family = 6,
683 .model = 6,
684 .stepping = 3,
685 .features[FEAT_1_EDX] =
686 PPRO_FEATURES |
687 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
688 CPUID_PSE36,
689 .features[FEAT_1_ECX] =
690 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
691 .features[FEAT_8000_0001_EDX] =
692 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
693 .features[FEAT_8000_0001_ECX] =
694 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
695 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
696 .xlevel = 0x8000000A,
699 .name = "phenom",
700 .level = 5,
701 .vendor = CPUID_VENDOR_AMD,
702 .family = 16,
703 .model = 2,
704 .stepping = 3,
705 /* Missing: CPUID_HT */
706 .features[FEAT_1_EDX] =
707 PPRO_FEATURES |
708 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
709 CPUID_PSE36 | CPUID_VME,
710 .features[FEAT_1_ECX] =
711 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
712 CPUID_EXT_POPCNT,
713 .features[FEAT_8000_0001_EDX] =
714 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
715 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
716 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
717 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
718 CPUID_EXT3_CR8LEG,
719 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
720 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
721 .features[FEAT_8000_0001_ECX] =
722 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
723 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
724 /* Missing: CPUID_SVM_LBRV */
725 .features[FEAT_SVM] =
726 CPUID_SVM_NPT,
727 .xlevel = 0x8000001A,
728 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
731 .name = "core2duo",
732 .level = 10,
733 .vendor = CPUID_VENDOR_INTEL,
734 .family = 6,
735 .model = 15,
736 .stepping = 11,
737 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
738 .features[FEAT_1_EDX] =
739 PPRO_FEATURES |
740 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
741 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
742 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
743 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
744 .features[FEAT_1_ECX] =
745 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
746 CPUID_EXT_CX16,
747 .features[FEAT_8000_0001_EDX] =
748 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
749 .features[FEAT_8000_0001_ECX] =
750 CPUID_EXT3_LAHF_LM,
751 .xlevel = 0x80000008,
752 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
755 .name = "kvm64",
756 .level = 5,
757 .vendor = CPUID_VENDOR_INTEL,
758 .family = 15,
759 .model = 6,
760 .stepping = 1,
761 /* Missing: CPUID_HT */
762 .features[FEAT_1_EDX] =
763 PPRO_FEATURES | CPUID_VME |
764 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
765 CPUID_PSE36,
766 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
770 .features[FEAT_8000_0001_EDX] =
771 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
772 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
773 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
774 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
775 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
776 .features[FEAT_8000_0001_ECX] =
778 .xlevel = 0x80000008,
779 .model_id = "Common KVM processor"
782 .name = "qemu32",
783 .level = 4,
784 .vendor = CPUID_VENDOR_INTEL,
785 .family = 6,
786 .model = 6,
787 .stepping = 3,
788 .features[FEAT_1_EDX] =
789 PPRO_FEATURES,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
792 .xlevel = 0x80000004,
795 .name = "kvm32",
796 .level = 5,
797 .vendor = CPUID_VENDOR_INTEL,
798 .family = 15,
799 .model = 6,
800 .stepping = 1,
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES | CPUID_VME |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
804 .features[FEAT_1_ECX] =
805 CPUID_EXT_SSE3,
806 .features[FEAT_8000_0001_ECX] =
808 .xlevel = 0x80000008,
809 .model_id = "Common 32-bit KVM processor"
812 .name = "coreduo",
813 .level = 10,
814 .vendor = CPUID_VENDOR_INTEL,
815 .family = 6,
816 .model = 14,
817 .stepping = 8,
818 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
822 CPUID_SS,
823 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
824 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
825 .features[FEAT_1_ECX] =
826 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_NX,
829 .xlevel = 0x80000008,
830 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
833 .name = "486",
834 .level = 1,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 4,
837 .model = 8,
838 .stepping = 0,
839 .features[FEAT_1_EDX] =
840 I486_FEATURES,
841 .xlevel = 0,
844 .name = "pentium",
845 .level = 1,
846 .vendor = CPUID_VENDOR_INTEL,
847 .family = 5,
848 .model = 4,
849 .stepping = 3,
850 .features[FEAT_1_EDX] =
851 PENTIUM_FEATURES,
852 .xlevel = 0,
855 .name = "pentium2",
856 .level = 2,
857 .vendor = CPUID_VENDOR_INTEL,
858 .family = 6,
859 .model = 5,
860 .stepping = 2,
861 .features[FEAT_1_EDX] =
862 PENTIUM2_FEATURES,
863 .xlevel = 0,
866 .name = "pentium3",
867 .level = 2,
868 .vendor = CPUID_VENDOR_INTEL,
869 .family = 6,
870 .model = 7,
871 .stepping = 3,
872 .features[FEAT_1_EDX] =
873 PENTIUM3_FEATURES,
874 .xlevel = 0,
877 .name = "athlon",
878 .level = 2,
879 .vendor = CPUID_VENDOR_AMD,
880 .family = 6,
881 .model = 2,
882 .stepping = 3,
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
885 CPUID_MCA,
886 .features[FEAT_8000_0001_EDX] =
887 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
888 .xlevel = 0x80000008,
891 .name = "n270",
892 /* original is on level 10 */
893 .level = 5,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 6,
896 .model = 28,
897 .stepping = 2,
898 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
899 .features[FEAT_1_EDX] =
900 PPRO_FEATURES |
901 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
902 CPUID_ACPI | CPUID_SS,
903 /* Some CPUs got no CPUID_SEP */
904 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
905 * CPUID_EXT_XTPR */
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
908 CPUID_EXT_MOVBE,
909 .features[FEAT_8000_0001_EDX] =
910 CPUID_EXT2_NX,
911 .features[FEAT_8000_0001_ECX] =
912 CPUID_EXT3_LAHF_LM,
913 .xlevel = 0x8000000A,
914 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
917 .name = "Conroe",
918 .level = 4,
919 .vendor = CPUID_VENDOR_INTEL,
920 .family = 6,
921 .model = 15,
922 .stepping = 3,
923 .features[FEAT_1_EDX] =
924 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
925 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
926 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
927 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
928 CPUID_DE | CPUID_FP87,
929 .features[FEAT_1_ECX] =
930 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
931 .features[FEAT_8000_0001_EDX] =
932 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
933 .features[FEAT_8000_0001_ECX] =
934 CPUID_EXT3_LAHF_LM,
935 .xlevel = 0x8000000A,
936 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
939 .name = "Penryn",
940 .level = 4,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 6,
943 .model = 23,
944 .stepping = 3,
945 .features[FEAT_1_EDX] =
946 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
947 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
948 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
949 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
950 CPUID_DE | CPUID_FP87,
951 .features[FEAT_1_ECX] =
952 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
953 CPUID_EXT_SSE3,
954 .features[FEAT_8000_0001_EDX] =
955 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
956 .features[FEAT_8000_0001_ECX] =
957 CPUID_EXT3_LAHF_LM,
958 .xlevel = 0x8000000A,
959 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
962 .name = "Nehalem",
963 .level = 4,
964 .vendor = CPUID_VENDOR_INTEL,
965 .family = 6,
966 .model = 26,
967 .stepping = 3,
968 .features[FEAT_1_EDX] =
969 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
970 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
971 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
972 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
973 CPUID_DE | CPUID_FP87,
974 .features[FEAT_1_ECX] =
975 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
976 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
977 .features[FEAT_8000_0001_EDX] =
978 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
979 .features[FEAT_8000_0001_ECX] =
980 CPUID_EXT3_LAHF_LM,
981 .xlevel = 0x8000000A,
982 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
985 .name = "Westmere",
986 .level = 11,
987 .vendor = CPUID_VENDOR_INTEL,
988 .family = 6,
989 .model = 44,
990 .stepping = 1,
991 .features[FEAT_1_EDX] =
992 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
993 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
994 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
995 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
996 CPUID_DE | CPUID_FP87,
997 .features[FEAT_1_ECX] =
998 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
999 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1000 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1001 .features[FEAT_8000_0001_EDX] =
1002 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1003 .features[FEAT_8000_0001_ECX] =
1004 CPUID_EXT3_LAHF_LM,
1005 .xlevel = 0x8000000A,
1006 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1009 .name = "SandyBridge",
1010 .level = 0xd,
1011 .vendor = CPUID_VENDOR_INTEL,
1012 .family = 6,
1013 .model = 42,
1014 .stepping = 1,
1015 .features[FEAT_1_EDX] =
1016 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1017 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1018 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1019 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1020 CPUID_DE | CPUID_FP87,
1021 .features[FEAT_1_ECX] =
1022 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1023 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1024 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1025 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1026 CPUID_EXT_SSE3,
1027 .features[FEAT_8000_0001_EDX] =
1028 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1029 CPUID_EXT2_SYSCALL,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .features[FEAT_XSAVE] =
1033 CPUID_XSAVE_XSAVEOPT,
1034 .xlevel = 0x8000000A,
1035 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1038 .name = "IvyBridge",
1039 .level = 0xd,
1040 .vendor = CPUID_VENDOR_INTEL,
1041 .family = 6,
1042 .model = 58,
1043 .stepping = 9,
1044 .features[FEAT_1_EDX] =
1045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1049 CPUID_DE | CPUID_FP87,
1050 .features[FEAT_1_ECX] =
1051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1052 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1053 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1054 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1055 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1056 .features[FEAT_7_0_EBX] =
1057 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1058 CPUID_7_0_EBX_ERMS,
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1061 CPUID_EXT2_SYSCALL,
1062 .features[FEAT_8000_0001_ECX] =
1063 CPUID_EXT3_LAHF_LM,
1064 .features[FEAT_XSAVE] =
1065 CPUID_XSAVE_XSAVEOPT,
1066 .xlevel = 0x8000000A,
1067 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1070 .name = "Haswell-noTSX",
1071 .level = 0xd,
1072 .vendor = CPUID_VENDOR_INTEL,
1073 .family = 6,
1074 .model = 60,
1075 .stepping = 1,
1076 .features[FEAT_1_EDX] =
1077 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1078 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1079 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1080 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1081 CPUID_DE | CPUID_FP87,
1082 .features[FEAT_1_ECX] =
1083 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1084 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1085 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1086 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1087 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1088 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1089 .features[FEAT_8000_0001_EDX] =
1090 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1091 CPUID_EXT2_SYSCALL,
1092 .features[FEAT_8000_0001_ECX] =
1093 CPUID_EXT3_LAHF_LM,
1094 .features[FEAT_7_0_EBX] =
1095 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1096 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1097 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1098 .features[FEAT_XSAVE] =
1099 CPUID_XSAVE_XSAVEOPT,
1100 .xlevel = 0x8000000A,
1101 .model_id = "Intel Core Processor (Haswell, no TSX)",
1102 }, {
1103 .name = "Haswell",
1104 .level = 0xd,
1105 .vendor = CPUID_VENDOR_INTEL,
1106 .family = 6,
1107 .model = 60,
1108 .stepping = 1,
1109 .features[FEAT_1_EDX] =
1110 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1117 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1118 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1119 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1121 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1122 .features[FEAT_8000_0001_EDX] =
1123 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1124 CPUID_EXT2_SYSCALL,
1125 .features[FEAT_8000_0001_ECX] =
1126 CPUID_EXT3_LAHF_LM,
1127 .features[FEAT_7_0_EBX] =
1128 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1129 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1130 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1131 CPUID_7_0_EBX_RTM,
1132 .features[FEAT_XSAVE] =
1133 CPUID_XSAVE_XSAVEOPT,
1134 .xlevel = 0x8000000A,
1135 .model_id = "Intel Core Processor (Haswell)",
1138 .name = "Broadwell-noTSX",
1139 .level = 0xd,
1140 .vendor = CPUID_VENDOR_INTEL,
1141 .family = 6,
1142 .model = 61,
1143 .stepping = 2,
1144 .features[FEAT_1_EDX] =
1145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1149 CPUID_DE | CPUID_FP87,
1150 .features[FEAT_1_ECX] =
1151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_8000_0001_EDX] =
1158 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1159 CPUID_EXT2_SYSCALL,
1160 .features[FEAT_8000_0001_ECX] =
1161 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1162 .features[FEAT_7_0_EBX] =
1163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1164 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1166 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1167 CPUID_7_0_EBX_SMAP,
1168 .features[FEAT_XSAVE] =
1169 CPUID_XSAVE_XSAVEOPT,
1170 .xlevel = 0x8000000A,
1171 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1174 .name = "Broadwell",
1175 .level = 0xd,
1176 .vendor = CPUID_VENDOR_INTEL,
1177 .family = 6,
1178 .model = 61,
1179 .stepping = 2,
1180 .features[FEAT_1_EDX] =
1181 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1182 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1183 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1184 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1185 CPUID_DE | CPUID_FP87,
1186 .features[FEAT_1_ECX] =
1187 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1188 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1189 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1190 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1191 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1192 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1193 .features[FEAT_8000_0001_EDX] =
1194 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1195 CPUID_EXT2_SYSCALL,
1196 .features[FEAT_8000_0001_ECX] =
1197 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1198 .features[FEAT_7_0_EBX] =
1199 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1200 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1201 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1202 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1203 CPUID_7_0_EBX_SMAP,
1204 .features[FEAT_XSAVE] =
1205 CPUID_XSAVE_XSAVEOPT,
1206 .xlevel = 0x8000000A,
1207 .model_id = "Intel Core Processor (Broadwell)",
1210 .name = "Opteron_G1",
1211 .level = 5,
1212 .vendor = CPUID_VENDOR_AMD,
1213 .family = 15,
1214 .model = 6,
1215 .stepping = 1,
1216 .features[FEAT_1_EDX] =
1217 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1218 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1219 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1220 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1221 CPUID_DE | CPUID_FP87,
1222 .features[FEAT_1_ECX] =
1223 CPUID_EXT_SSE3,
1224 .features[FEAT_8000_0001_EDX] =
1225 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1226 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1227 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1228 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1229 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1230 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1231 .xlevel = 0x80000008,
1232 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1235 .name = "Opteron_G2",
1236 .level = 5,
1237 .vendor = CPUID_VENDOR_AMD,
1238 .family = 15,
1239 .model = 6,
1240 .stepping = 1,
1241 .features[FEAT_1_EDX] =
1242 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1243 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1244 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1245 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1246 CPUID_DE | CPUID_FP87,
1247 .features[FEAT_1_ECX] =
1248 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1249 .features[FEAT_8000_0001_EDX] =
1250 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1251 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1252 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1253 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1254 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1255 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1256 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1257 .features[FEAT_8000_0001_ECX] =
1258 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1259 .xlevel = 0x80000008,
1260 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1263 .name = "Opteron_G3",
1264 .level = 5,
1265 .vendor = CPUID_VENDOR_AMD,
1266 .family = 15,
1267 .model = 6,
1268 .stepping = 1,
1269 .features[FEAT_1_EDX] =
1270 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1271 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1272 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1273 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1274 CPUID_DE | CPUID_FP87,
1275 .features[FEAT_1_ECX] =
1276 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1277 CPUID_EXT_SSE3,
1278 .features[FEAT_8000_0001_EDX] =
1279 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1280 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1281 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1282 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1283 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1284 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1285 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1286 .features[FEAT_8000_0001_ECX] =
1287 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1288 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1289 .xlevel = 0x80000008,
1290 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1293 .name = "Opteron_G4",
1294 .level = 0xd,
1295 .vendor = CPUID_VENDOR_AMD,
1296 .family = 21,
1297 .model = 1,
1298 .stepping = 2,
1299 .features[FEAT_1_EDX] =
1300 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1301 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1302 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1303 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1304 CPUID_DE | CPUID_FP87,
1305 .features[FEAT_1_ECX] =
1306 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1307 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1308 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1309 CPUID_EXT_SSE3,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1312 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1313 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1314 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1315 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1316 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1317 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1318 .features[FEAT_8000_0001_ECX] =
1319 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1320 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1321 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1322 CPUID_EXT3_LAHF_LM,
1323 /* no xsaveopt! */
1324 .xlevel = 0x8000001A,
1325 .model_id = "AMD Opteron 62xx class CPU",
1328 .name = "Opteron_G5",
1329 .level = 0xd,
1330 .vendor = CPUID_VENDOR_AMD,
1331 .family = 21,
1332 .model = 2,
1333 .stepping = 0,
1334 .features[FEAT_1_EDX] =
1335 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1336 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1337 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1338 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1339 CPUID_DE | CPUID_FP87,
1340 .features[FEAT_1_ECX] =
1341 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1342 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1343 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1344 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1345 .features[FEAT_8000_0001_EDX] =
1346 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1347 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1348 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1349 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1350 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1351 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1352 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1353 .features[FEAT_8000_0001_ECX] =
1354 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1355 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1356 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1357 CPUID_EXT3_LAHF_LM,
1358 /* no xsaveopt! */
1359 .xlevel = 0x8000001A,
1360 .model_id = "AMD Opteron 63xx class CPU",
1365 * x86_cpu_compat_set_features:
1366 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1367 * @w: Identifies the feature word to be changed.
1368 * @feat_add: Feature bits to be added to feature word
1369 * @feat_remove: Feature bits to be removed from feature word
1371 * Change CPU model feature bits for compatibility.
1373 * This function may be used by machine-type compatibility functions
1374 * to enable or disable feature bits on specific CPU models.
1376 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1377 uint32_t feat_add, uint32_t feat_remove)
1379 X86CPUDefinition *def;
1380 int i;
1381 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1382 def = &builtin_x86_defs[i];
1383 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1384 def->features[w] |= feat_add;
1385 def->features[w] &= ~feat_remove;
1390 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1391 bool migratable_only);
1393 #ifdef CONFIG_KVM
1395 static int cpu_x86_fill_model_id(char *str)
1397 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1398 int i;
1400 for (i = 0; i < 3; i++) {
1401 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1402 memcpy(str + i * 16 + 0, &eax, 4);
1403 memcpy(str + i * 16 + 4, &ebx, 4);
1404 memcpy(str + i * 16 + 8, &ecx, 4);
1405 memcpy(str + i * 16 + 12, &edx, 4);
1407 return 0;
1410 static X86CPUDefinition host_cpudef;
1412 static Property host_x86_cpu_properties[] = {
1413 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1414 DEFINE_PROP_END_OF_LIST()
1417 /* class_init for the "host" CPU model
1419 * This function may be called before KVM is initialized.
1421 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1423 DeviceClass *dc = DEVICE_CLASS(oc);
1424 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1425 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1427 xcc->kvm_required = true;
1429 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1430 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1432 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1433 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1434 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1435 host_cpudef.stepping = eax & 0x0F;
1437 cpu_x86_fill_model_id(host_cpudef.model_id);
1439 xcc->cpu_def = &host_cpudef;
1440 host_cpudef.cache_info_passthrough = true;
1442 /* level, xlevel, xlevel2, and the feature words are initialized on
1443 * instance_init, because they require KVM to be initialized.
1446 dc->props = host_x86_cpu_properties;
1449 static void host_x86_cpu_initfn(Object *obj)
1451 X86CPU *cpu = X86_CPU(obj);
1452 CPUX86State *env = &cpu->env;
1453 KVMState *s = kvm_state;
1455 assert(kvm_enabled());
1457 /* We can't fill the features array here because we don't know yet if
1458 * "migratable" is true or false.
1460 cpu->host_features = true;
1462 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1463 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1464 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1466 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1469 static const TypeInfo host_x86_cpu_type_info = {
1470 .name = X86_CPU_TYPE_NAME("host"),
1471 .parent = TYPE_X86_CPU,
1472 .instance_init = host_x86_cpu_initfn,
1473 .class_init = host_x86_cpu_class_init,
1476 #endif
1478 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1480 FeatureWordInfo *f = &feature_word_info[w];
1481 int i;
1483 for (i = 0; i < 32; ++i) {
1484 if (1 << i & mask) {
1485 const char *reg = get_register_name_32(f->cpuid_reg);
1486 assert(reg);
1487 fprintf(stderr, "warning: %s doesn't support requested feature: "
1488 "CPUID.%02XH:%s%s%s [bit %d]\n",
1489 kvm_enabled() ? "host" : "TCG",
1490 f->cpuid_eax, reg,
1491 f->feat_names[i] ? "." : "",
1492 f->feat_names[i] ? f->feat_names[i] : "", i);
1497 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1498 const char *name, Error **errp)
1500 X86CPU *cpu = X86_CPU(obj);
1501 CPUX86State *env = &cpu->env;
1502 int64_t value;
1504 value = (env->cpuid_version >> 8) & 0xf;
1505 if (value == 0xf) {
1506 value += (env->cpuid_version >> 20) & 0xff;
1508 visit_type_int(v, &value, name, errp);
1511 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1512 const char *name, Error **errp)
1514 X86CPU *cpu = X86_CPU(obj);
1515 CPUX86State *env = &cpu->env;
1516 const int64_t min = 0;
1517 const int64_t max = 0xff + 0xf;
1518 Error *local_err = NULL;
1519 int64_t value;
1521 visit_type_int(v, &value, name, &local_err);
1522 if (local_err) {
1523 error_propagate(errp, local_err);
1524 return;
1526 if (value < min || value > max) {
1527 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1528 name ? name : "null", value, min, max);
1529 return;
1532 env->cpuid_version &= ~0xff00f00;
1533 if (value > 0x0f) {
1534 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1535 } else {
1536 env->cpuid_version |= value << 8;
1540 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1541 const char *name, Error **errp)
1543 X86CPU *cpu = X86_CPU(obj);
1544 CPUX86State *env = &cpu->env;
1545 int64_t value;
1547 value = (env->cpuid_version >> 4) & 0xf;
1548 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1549 visit_type_int(v, &value, name, errp);
1552 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1553 const char *name, Error **errp)
1555 X86CPU *cpu = X86_CPU(obj);
1556 CPUX86State *env = &cpu->env;
1557 const int64_t min = 0;
1558 const int64_t max = 0xff;
1559 Error *local_err = NULL;
1560 int64_t value;
1562 visit_type_int(v, &value, name, &local_err);
1563 if (local_err) {
1564 error_propagate(errp, local_err);
1565 return;
1567 if (value < min || value > max) {
1568 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1569 name ? name : "null", value, min, max);
1570 return;
1573 env->cpuid_version &= ~0xf00f0;
1574 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1577 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1578 void *opaque, const char *name,
1579 Error **errp)
1581 X86CPU *cpu = X86_CPU(obj);
1582 CPUX86State *env = &cpu->env;
1583 int64_t value;
1585 value = env->cpuid_version & 0xf;
1586 visit_type_int(v, &value, name, errp);
1589 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1590 void *opaque, const char *name,
1591 Error **errp)
1593 X86CPU *cpu = X86_CPU(obj);
1594 CPUX86State *env = &cpu->env;
1595 const int64_t min = 0;
1596 const int64_t max = 0xf;
1597 Error *local_err = NULL;
1598 int64_t value;
1600 visit_type_int(v, &value, name, &local_err);
1601 if (local_err) {
1602 error_propagate(errp, local_err);
1603 return;
1605 if (value < min || value > max) {
1606 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1607 name ? name : "null", value, min, max);
1608 return;
1611 env->cpuid_version &= ~0xf;
1612 env->cpuid_version |= value & 0xf;
1615 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1617 X86CPU *cpu = X86_CPU(obj);
1618 CPUX86State *env = &cpu->env;
1619 char *value;
1621 value = g_malloc(CPUID_VENDOR_SZ + 1);
1622 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1623 env->cpuid_vendor3);
1624 return value;
1627 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1628 Error **errp)
1630 X86CPU *cpu = X86_CPU(obj);
1631 CPUX86State *env = &cpu->env;
1632 int i;
1634 if (strlen(value) != CPUID_VENDOR_SZ) {
1635 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1636 "vendor", value);
1637 return;
1640 env->cpuid_vendor1 = 0;
1641 env->cpuid_vendor2 = 0;
1642 env->cpuid_vendor3 = 0;
1643 for (i = 0; i < 4; i++) {
1644 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1645 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1646 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1650 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1652 X86CPU *cpu = X86_CPU(obj);
1653 CPUX86State *env = &cpu->env;
1654 char *value;
1655 int i;
1657 value = g_malloc(48 + 1);
1658 for (i = 0; i < 48; i++) {
1659 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1661 value[48] = '\0';
1662 return value;
1665 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1666 Error **errp)
1668 X86CPU *cpu = X86_CPU(obj);
1669 CPUX86State *env = &cpu->env;
1670 int c, len, i;
1672 if (model_id == NULL) {
1673 model_id = "";
1675 len = strlen(model_id);
1676 memset(env->cpuid_model, 0, 48);
1677 for (i = 0; i < 48; i++) {
1678 if (i >= len) {
1679 c = '\0';
1680 } else {
1681 c = (uint8_t)model_id[i];
1683 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1687 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1688 const char *name, Error **errp)
1690 X86CPU *cpu = X86_CPU(obj);
1691 int64_t value;
1693 value = cpu->env.tsc_khz * 1000;
1694 visit_type_int(v, &value, name, errp);
1697 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1698 const char *name, Error **errp)
1700 X86CPU *cpu = X86_CPU(obj);
1701 const int64_t min = 0;
1702 const int64_t max = INT64_MAX;
1703 Error *local_err = NULL;
1704 int64_t value;
1706 visit_type_int(v, &value, name, &local_err);
1707 if (local_err) {
1708 error_propagate(errp, local_err);
1709 return;
1711 if (value < min || value > max) {
1712 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1713 name ? name : "null", value, min, max);
1714 return;
1717 cpu->env.tsc_khz = value / 1000;
1720 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1721 const char *name, Error **errp)
1723 X86CPU *cpu = X86_CPU(obj);
1724 int64_t value = cpu->apic_id;
1726 visit_type_int(v, &value, name, errp);
1729 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1730 const char *name, Error **errp)
1732 X86CPU *cpu = X86_CPU(obj);
1733 DeviceState *dev = DEVICE(obj);
1734 const int64_t min = 0;
1735 const int64_t max = UINT32_MAX;
1736 Error *error = NULL;
1737 int64_t value;
1739 if (dev->realized) {
1740 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1741 "it was realized", name, object_get_typename(obj));
1742 return;
1745 visit_type_int(v, &value, name, &error);
1746 if (error) {
1747 error_propagate(errp, error);
1748 return;
1750 if (value < min || value > max) {
1751 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1752 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1753 object_get_typename(obj), name, value, min, max);
1754 return;
1757 if ((value != cpu->apic_id) && cpu_exists(value)) {
1758 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1759 return;
1761 cpu->apic_id = value;
1764 /* Generic getter for "feature-words" and "filtered-features" properties */
1765 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1766 const char *name, Error **errp)
1768 uint32_t *array = (uint32_t *)opaque;
1769 FeatureWord w;
1770 Error *err = NULL;
1771 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1772 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1773 X86CPUFeatureWordInfoList *list = NULL;
1775 for (w = 0; w < FEATURE_WORDS; w++) {
1776 FeatureWordInfo *wi = &feature_word_info[w];
1777 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1778 qwi->cpuid_input_eax = wi->cpuid_eax;
1779 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1780 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1781 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1782 qwi->features = array[w];
1784 /* List will be in reverse order, but order shouldn't matter */
1785 list_entries[w].next = list;
1786 list_entries[w].value = &word_infos[w];
1787 list = &list_entries[w];
1790 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1791 error_propagate(errp, err);
1794 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1795 const char *name, Error **errp)
1797 X86CPU *cpu = X86_CPU(obj);
1798 int64_t value = cpu->hyperv_spinlock_attempts;
1800 visit_type_int(v, &value, name, errp);
1803 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1804 const char *name, Error **errp)
1806 const int64_t min = 0xFFF;
1807 const int64_t max = UINT_MAX;
1808 X86CPU *cpu = X86_CPU(obj);
1809 Error *err = NULL;
1810 int64_t value;
1812 visit_type_int(v, &value, name, &err);
1813 if (err) {
1814 error_propagate(errp, err);
1815 return;
1818 if (value < min || value > max) {
1819 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1820 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1821 object_get_typename(obj), name ? name : "null",
1822 value, min, max);
1823 return;
1825 cpu->hyperv_spinlock_attempts = value;
1828 static PropertyInfo qdev_prop_spinlocks = {
1829 .name = "int",
1830 .get = x86_get_hv_spinlocks,
1831 .set = x86_set_hv_spinlocks,
1834 /* Convert all '_' in a feature string option name to '-', to make feature
1835 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1837 static inline void feat2prop(char *s)
1839 while ((s = strchr(s, '_'))) {
1840 *s = '-';
1844 /* Parse "+feature,-feature,feature=foo" CPU feature string
1846 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1847 Error **errp)
1849 X86CPU *cpu = X86_CPU(cs);
1850 char *featurestr; /* Single 'key=value" string being parsed */
1851 FeatureWord w;
1852 /* Features to be added */
1853 FeatureWordArray plus_features = { 0 };
1854 /* Features to be removed */
1855 FeatureWordArray minus_features = { 0 };
1856 uint32_t numvalue;
1857 CPUX86State *env = &cpu->env;
1858 Error *local_err = NULL;
1860 featurestr = features ? strtok(features, ",") : NULL;
1862 while (featurestr) {
1863 char *val;
1864 if (featurestr[0] == '+') {
1865 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1866 } else if (featurestr[0] == '-') {
1867 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1868 } else if ((val = strchr(featurestr, '='))) {
1869 *val = 0; val++;
1870 feat2prop(featurestr);
1871 if (!strcmp(featurestr, "xlevel")) {
1872 char *err;
1873 char num[32];
1875 numvalue = strtoul(val, &err, 0);
1876 if (!*val || *err) {
1877 error_setg(errp, "bad numerical value %s", val);
1878 return;
1880 if (numvalue < 0x80000000) {
1881 error_report("xlevel value shall always be >= 0x80000000"
1882 ", fixup will be removed in future versions");
1883 numvalue += 0x80000000;
1885 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1886 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1887 } else if (!strcmp(featurestr, "tsc-freq")) {
1888 int64_t tsc_freq;
1889 char *err;
1890 char num[32];
1892 tsc_freq = strtosz_suffix_unit(val, &err,
1893 STRTOSZ_DEFSUFFIX_B, 1000);
1894 if (tsc_freq < 0 || *err) {
1895 error_setg(errp, "bad numerical value %s", val);
1896 return;
1898 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1899 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1900 &local_err);
1901 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1902 char *err;
1903 const int min = 0xFFF;
1904 char num[32];
1905 numvalue = strtoul(val, &err, 0);
1906 if (!*val || *err) {
1907 error_setg(errp, "bad numerical value %s", val);
1908 return;
1910 if (numvalue < min) {
1911 error_report("hv-spinlocks value shall always be >= 0x%x"
1912 ", fixup will be removed in future versions",
1913 min);
1914 numvalue = min;
1916 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1917 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1918 } else {
1919 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1921 } else {
1922 feat2prop(featurestr);
1923 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1925 if (local_err) {
1926 error_propagate(errp, local_err);
1927 return;
1929 featurestr = strtok(NULL, ",");
1932 if (cpu->host_features) {
1933 for (w = 0; w < FEATURE_WORDS; w++) {
1934 env->features[w] =
1935 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1939 for (w = 0; w < FEATURE_WORDS; w++) {
1940 env->features[w] |= plus_features[w];
1941 env->features[w] &= ~minus_features[w];
1945 /* Print all cpuid feature names in featureset
1947 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1949 int bit;
1950 bool first = true;
1952 for (bit = 0; bit < 32; bit++) {
1953 if (featureset[bit]) {
1954 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1955 first = false;
1960 /* generate CPU information. */
1961 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1963 X86CPUDefinition *def;
1964 char buf[256];
1965 int i;
1967 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1968 def = &builtin_x86_defs[i];
1969 snprintf(buf, sizeof(buf), "%s", def->name);
1970 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1972 #ifdef CONFIG_KVM
1973 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1974 "KVM processor with all supported host features "
1975 "(only available in KVM mode)");
1976 #endif
1978 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1979 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1980 FeatureWordInfo *fw = &feature_word_info[i];
1982 (*cpu_fprintf)(f, " ");
1983 listflags(f, cpu_fprintf, fw->feat_names);
1984 (*cpu_fprintf)(f, "\n");
1988 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1990 CpuDefinitionInfoList *cpu_list = NULL;
1991 X86CPUDefinition *def;
1992 int i;
1994 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1995 CpuDefinitionInfoList *entry;
1996 CpuDefinitionInfo *info;
1998 def = &builtin_x86_defs[i];
1999 info = g_malloc0(sizeof(*info));
2000 info->name = g_strdup(def->name);
2002 entry = g_malloc0(sizeof(*entry));
2003 entry->value = info;
2004 entry->next = cpu_list;
2005 cpu_list = entry;
2008 return cpu_list;
2011 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2012 bool migratable_only)
2014 FeatureWordInfo *wi = &feature_word_info[w];
2015 uint32_t r;
2017 if (kvm_enabled()) {
2018 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2019 wi->cpuid_ecx,
2020 wi->cpuid_reg);
2021 } else if (tcg_enabled()) {
2022 r = wi->tcg_features;
2023 } else {
2024 return ~0;
2026 if (migratable_only) {
2027 r &= x86_cpu_get_migratable_flags(w);
2029 return r;
2033 * Filters CPU feature words based on host availability of each feature.
2035 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2037 static int x86_cpu_filter_features(X86CPU *cpu)
2039 CPUX86State *env = &cpu->env;
2040 FeatureWord w;
2041 int rv = 0;
2043 for (w = 0; w < FEATURE_WORDS; w++) {
2044 uint32_t host_feat =
2045 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2046 uint32_t requested_features = env->features[w];
2047 env->features[w] &= host_feat;
2048 cpu->filtered_features[w] = requested_features & ~env->features[w];
2049 if (cpu->filtered_features[w]) {
2050 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2051 report_unavailable_features(w, cpu->filtered_features[w]);
2053 rv = 1;
2057 return rv;
2060 /* Load data from X86CPUDefinition
2062 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2064 CPUX86State *env = &cpu->env;
2065 const char *vendor;
2066 char host_vendor[CPUID_VENDOR_SZ + 1];
2067 FeatureWord w;
2069 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2070 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2071 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2072 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2073 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2074 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2075 cpu->cache_info_passthrough = def->cache_info_passthrough;
2076 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2077 for (w = 0; w < FEATURE_WORDS; w++) {
2078 env->features[w] = def->features[w];
2081 /* Special cases not set in the X86CPUDefinition structs: */
2082 if (kvm_enabled()) {
2083 FeatureWord w;
2084 for (w = 0; w < FEATURE_WORDS; w++) {
2085 env->features[w] |= kvm_default_features[w];
2086 env->features[w] &= ~kvm_default_unset_features[w];
2090 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2092 /* sysenter isn't supported in compatibility mode on AMD,
2093 * syscall isn't supported in compatibility mode on Intel.
2094 * Normally we advertise the actual CPU vendor, but you can
2095 * override this using the 'vendor' property if you want to use
2096 * KVM's sysenter/syscall emulation in compatibility mode and
2097 * when doing cross vendor migration
2099 vendor = def->vendor;
2100 if (kvm_enabled()) {
2101 uint32_t ebx = 0, ecx = 0, edx = 0;
2102 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2103 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2104 vendor = host_vendor;
2107 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2111 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2113 X86CPU *cpu = NULL;
2114 X86CPUClass *xcc;
2115 ObjectClass *oc;
2116 gchar **model_pieces;
2117 char *name, *features;
2118 Error *error = NULL;
2120 model_pieces = g_strsplit(cpu_model, ",", 2);
2121 if (!model_pieces[0]) {
2122 error_setg(&error, "Invalid/empty CPU model name");
2123 goto out;
2125 name = model_pieces[0];
2126 features = model_pieces[1];
2128 oc = x86_cpu_class_by_name(name);
2129 if (oc == NULL) {
2130 error_setg(&error, "Unable to find CPU definition: %s", name);
2131 goto out;
2133 xcc = X86_CPU_CLASS(oc);
2135 if (xcc->kvm_required && !kvm_enabled()) {
2136 error_setg(&error, "CPU model '%s' requires KVM", name);
2137 goto out;
2140 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2142 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2143 if (error) {
2144 goto out;
2147 out:
2148 if (error != NULL) {
2149 error_propagate(errp, error);
2150 if (cpu) {
2151 object_unref(OBJECT(cpu));
2152 cpu = NULL;
2155 g_strfreev(model_pieces);
2156 return cpu;
2159 X86CPU *cpu_x86_init(const char *cpu_model)
2161 Error *error = NULL;
2162 X86CPU *cpu;
2164 cpu = cpu_x86_create(cpu_model, &error);
2165 if (error) {
2166 goto out;
2169 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2171 out:
2172 if (error) {
2173 error_report_err(error);
2174 if (cpu != NULL) {
2175 object_unref(OBJECT(cpu));
2176 cpu = NULL;
2179 return cpu;
2182 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2184 X86CPUDefinition *cpudef = data;
2185 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2187 xcc->cpu_def = cpudef;
2190 static void x86_register_cpudef_type(X86CPUDefinition *def)
2192 char *typename = x86_cpu_type_name(def->name);
2193 TypeInfo ti = {
2194 .name = typename,
2195 .parent = TYPE_X86_CPU,
2196 .class_init = x86_cpu_cpudef_class_init,
2197 .class_data = def,
2200 type_register(&ti);
2201 g_free(typename);
2204 #if !defined(CONFIG_USER_ONLY)
2206 void cpu_clear_apic_feature(CPUX86State *env)
2208 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2211 #endif /* !CONFIG_USER_ONLY */
2213 /* Initialize list of CPU models, filling some non-static fields if necessary
2215 void x86_cpudef_setup(void)
2217 int i, j;
2218 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2220 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2221 X86CPUDefinition *def = &builtin_x86_defs[i];
2223 /* Look for specific "cpudef" models that */
2224 /* have the QEMU version in .model_id */
2225 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2226 if (strcmp(model_with_versions[j], def->name) == 0) {
2227 pstrcpy(def->model_id, sizeof(def->model_id),
2228 "QEMU Virtual CPU version ");
2229 pstrcat(def->model_id, sizeof(def->model_id),
2230 qemu_get_version());
2231 break;
2237 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2238 uint32_t *eax, uint32_t *ebx,
2239 uint32_t *ecx, uint32_t *edx)
2241 X86CPU *cpu = x86_env_get_cpu(env);
2242 CPUState *cs = CPU(cpu);
2244 /* test if maximum index reached */
2245 if (index & 0x80000000) {
2246 if (index > env->cpuid_xlevel) {
2247 if (env->cpuid_xlevel2 > 0) {
2248 /* Handle the Centaur's CPUID instruction. */
2249 if (index > env->cpuid_xlevel2) {
2250 index = env->cpuid_xlevel2;
2251 } else if (index < 0xC0000000) {
2252 index = env->cpuid_xlevel;
2254 } else {
2255 /* Intel documentation states that invalid EAX input will
2256 * return the same information as EAX=cpuid_level
2257 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2259 index = env->cpuid_level;
2262 } else {
2263 if (index > env->cpuid_level)
2264 index = env->cpuid_level;
2267 switch(index) {
2268 case 0:
2269 *eax = env->cpuid_level;
2270 *ebx = env->cpuid_vendor1;
2271 *edx = env->cpuid_vendor2;
2272 *ecx = env->cpuid_vendor3;
2273 break;
2274 case 1:
2275 *eax = env->cpuid_version;
2276 *ebx = (cpu->apic_id << 24) |
2277 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2278 *ecx = env->features[FEAT_1_ECX];
2279 *edx = env->features[FEAT_1_EDX];
2280 if (cs->nr_cores * cs->nr_threads > 1) {
2281 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2282 *edx |= 1 << 28; /* HTT bit */
2284 break;
2285 case 2:
2286 /* cache info: needed for Pentium Pro compatibility */
2287 if (cpu->cache_info_passthrough) {
2288 host_cpuid(index, 0, eax, ebx, ecx, edx);
2289 break;
2291 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2292 *ebx = 0;
2293 *ecx = 0;
2294 *edx = (L1D_DESCRIPTOR << 16) | \
2295 (L1I_DESCRIPTOR << 8) | \
2296 (L2_DESCRIPTOR);
2297 break;
2298 case 4:
2299 /* cache info: needed for Core compatibility */
2300 if (cpu->cache_info_passthrough) {
2301 host_cpuid(index, count, eax, ebx, ecx, edx);
2302 *eax &= ~0xFC000000;
2303 } else {
2304 *eax = 0;
2305 switch (count) {
2306 case 0: /* L1 dcache info */
2307 *eax |= CPUID_4_TYPE_DCACHE | \
2308 CPUID_4_LEVEL(1) | \
2309 CPUID_4_SELF_INIT_LEVEL;
2310 *ebx = (L1D_LINE_SIZE - 1) | \
2311 ((L1D_PARTITIONS - 1) << 12) | \
2312 ((L1D_ASSOCIATIVITY - 1) << 22);
2313 *ecx = L1D_SETS - 1;
2314 *edx = CPUID_4_NO_INVD_SHARING;
2315 break;
2316 case 1: /* L1 icache info */
2317 *eax |= CPUID_4_TYPE_ICACHE | \
2318 CPUID_4_LEVEL(1) | \
2319 CPUID_4_SELF_INIT_LEVEL;
2320 *ebx = (L1I_LINE_SIZE - 1) | \
2321 ((L1I_PARTITIONS - 1) << 12) | \
2322 ((L1I_ASSOCIATIVITY - 1) << 22);
2323 *ecx = L1I_SETS - 1;
2324 *edx = CPUID_4_NO_INVD_SHARING;
2325 break;
2326 case 2: /* L2 cache info */
2327 *eax |= CPUID_4_TYPE_UNIFIED | \
2328 CPUID_4_LEVEL(2) | \
2329 CPUID_4_SELF_INIT_LEVEL;
2330 if (cs->nr_threads > 1) {
2331 *eax |= (cs->nr_threads - 1) << 14;
2333 *ebx = (L2_LINE_SIZE - 1) | \
2334 ((L2_PARTITIONS - 1) << 12) | \
2335 ((L2_ASSOCIATIVITY - 1) << 22);
2336 *ecx = L2_SETS - 1;
2337 *edx = CPUID_4_NO_INVD_SHARING;
2338 break;
2339 default: /* end of info */
2340 *eax = 0;
2341 *ebx = 0;
2342 *ecx = 0;
2343 *edx = 0;
2344 break;
2348 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2349 if ((*eax & 31) && cs->nr_cores > 1) {
2350 *eax |= (cs->nr_cores - 1) << 26;
2352 break;
2353 case 5:
2354 /* mwait info: needed for Core compatibility */
2355 *eax = 0; /* Smallest monitor-line size in bytes */
2356 *ebx = 0; /* Largest monitor-line size in bytes */
2357 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2358 *edx = 0;
2359 break;
2360 case 6:
2361 /* Thermal and Power Leaf */
2362 *eax = 0;
2363 *ebx = 0;
2364 *ecx = 0;
2365 *edx = 0;
2366 break;
2367 case 7:
2368 /* Structured Extended Feature Flags Enumeration Leaf */
2369 if (count == 0) {
2370 *eax = 0; /* Maximum ECX value for sub-leaves */
2371 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2372 *ecx = 0; /* Reserved */
2373 *edx = 0; /* Reserved */
2374 } else {
2375 *eax = 0;
2376 *ebx = 0;
2377 *ecx = 0;
2378 *edx = 0;
2380 break;
2381 case 9:
2382 /* Direct Cache Access Information Leaf */
2383 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2384 *ebx = 0;
2385 *ecx = 0;
2386 *edx = 0;
2387 break;
2388 case 0xA:
2389 /* Architectural Performance Monitoring Leaf */
2390 if (kvm_enabled() && cpu->enable_pmu) {
2391 KVMState *s = cs->kvm_state;
2393 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2394 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2395 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2396 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2397 } else {
2398 *eax = 0;
2399 *ebx = 0;
2400 *ecx = 0;
2401 *edx = 0;
2403 break;
2404 case 0xD: {
2405 KVMState *s = cs->kvm_state;
2406 uint64_t kvm_mask;
2407 int i;
2409 /* Processor Extended State */
2410 *eax = 0;
2411 *ebx = 0;
2412 *ecx = 0;
2413 *edx = 0;
2414 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2415 break;
2417 kvm_mask =
2418 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2419 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2421 if (count == 0) {
2422 *ecx = 0x240;
2423 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2424 const ExtSaveArea *esa = &ext_save_areas[i];
2425 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2426 (kvm_mask & (1 << i)) != 0) {
2427 if (i < 32) {
2428 *eax |= 1 << i;
2429 } else {
2430 *edx |= 1 << (i - 32);
2432 *ecx = MAX(*ecx, esa->offset + esa->size);
2435 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2436 *ebx = *ecx;
2437 } else if (count == 1) {
2438 *eax = env->features[FEAT_XSAVE];
2439 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2440 const ExtSaveArea *esa = &ext_save_areas[count];
2441 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2442 (kvm_mask & (1 << count)) != 0) {
2443 *eax = esa->size;
2444 *ebx = esa->offset;
2447 break;
2449 case 0x80000000:
2450 *eax = env->cpuid_xlevel;
2451 *ebx = env->cpuid_vendor1;
2452 *edx = env->cpuid_vendor2;
2453 *ecx = env->cpuid_vendor3;
2454 break;
2455 case 0x80000001:
2456 *eax = env->cpuid_version;
2457 *ebx = 0;
2458 *ecx = env->features[FEAT_8000_0001_ECX];
2459 *edx = env->features[FEAT_8000_0001_EDX];
2461 /* The Linux kernel checks for the CMPLegacy bit and
2462 * discards multiple thread information if it is set.
2463 * So dont set it here for Intel to make Linux guests happy.
2465 if (cs->nr_cores * cs->nr_threads > 1) {
2466 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2467 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2468 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2469 *ecx |= 1 << 1; /* CmpLegacy bit */
2472 break;
2473 case 0x80000002:
2474 case 0x80000003:
2475 case 0x80000004:
2476 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2477 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2478 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2479 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2480 break;
2481 case 0x80000005:
2482 /* cache info (L1 cache) */
2483 if (cpu->cache_info_passthrough) {
2484 host_cpuid(index, 0, eax, ebx, ecx, edx);
2485 break;
2487 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2488 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2489 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2490 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2491 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2492 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2493 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2494 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2495 break;
2496 case 0x80000006:
2497 /* cache info (L2 cache) */
2498 if (cpu->cache_info_passthrough) {
2499 host_cpuid(index, 0, eax, ebx, ecx, edx);
2500 break;
2502 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2503 (L2_DTLB_2M_ENTRIES << 16) | \
2504 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2505 (L2_ITLB_2M_ENTRIES);
2506 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2507 (L2_DTLB_4K_ENTRIES << 16) | \
2508 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2509 (L2_ITLB_4K_ENTRIES);
2510 *ecx = (L2_SIZE_KB_AMD << 16) | \
2511 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2512 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2513 *edx = ((L3_SIZE_KB/512) << 18) | \
2514 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2515 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2516 break;
2517 case 0x80000007:
2518 *eax = 0;
2519 *ebx = 0;
2520 *ecx = 0;
2521 *edx = env->features[FEAT_8000_0007_EDX];
2522 break;
2523 case 0x80000008:
2524 /* virtual & phys address size in low 2 bytes. */
2525 /* XXX: This value must match the one used in the MMU code. */
2526 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2527 /* 64 bit processor */
2528 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2529 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2530 } else {
2531 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2532 *eax = 0x00000024; /* 36 bits physical */
2533 } else {
2534 *eax = 0x00000020; /* 32 bits physical */
2537 *ebx = 0;
2538 *ecx = 0;
2539 *edx = 0;
2540 if (cs->nr_cores * cs->nr_threads > 1) {
2541 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2543 break;
2544 case 0x8000000A:
2545 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2546 *eax = 0x00000001; /* SVM Revision */
2547 *ebx = 0x00000010; /* nr of ASIDs */
2548 *ecx = 0;
2549 *edx = env->features[FEAT_SVM]; /* optional features */
2550 } else {
2551 *eax = 0;
2552 *ebx = 0;
2553 *ecx = 0;
2554 *edx = 0;
2556 break;
2557 case 0xC0000000:
2558 *eax = env->cpuid_xlevel2;
2559 *ebx = 0;
2560 *ecx = 0;
2561 *edx = 0;
2562 break;
2563 case 0xC0000001:
2564 /* Support for VIA CPU's CPUID instruction */
2565 *eax = env->cpuid_version;
2566 *ebx = 0;
2567 *ecx = 0;
2568 *edx = env->features[FEAT_C000_0001_EDX];
2569 break;
2570 case 0xC0000002:
2571 case 0xC0000003:
2572 case 0xC0000004:
2573 /* Reserved for the future, and now filled with zero */
2574 *eax = 0;
2575 *ebx = 0;
2576 *ecx = 0;
2577 *edx = 0;
2578 break;
2579 default:
2580 /* reserved values: zero */
2581 *eax = 0;
2582 *ebx = 0;
2583 *ecx = 0;
2584 *edx = 0;
2585 break;
2589 /* CPUClass::reset() */
2590 static void x86_cpu_reset(CPUState *s)
2592 X86CPU *cpu = X86_CPU(s);
2593 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2594 CPUX86State *env = &cpu->env;
2595 int i;
2597 xcc->parent_reset(s);
2599 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2601 tlb_flush(s, 1);
2603 env->old_exception = -1;
2605 /* init to reset state */
2607 #ifdef CONFIG_SOFTMMU
2608 env->hflags |= HF_SOFTMMU_MASK;
2609 #endif
2610 env->hflags2 |= HF2_GIF_MASK;
2612 cpu_x86_update_cr0(env, 0x60000010);
2613 env->a20_mask = ~0x0;
2614 env->smbase = 0x30000;
2616 env->idt.limit = 0xffff;
2617 env->gdt.limit = 0xffff;
2618 env->ldt.limit = 0xffff;
2619 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2620 env->tr.limit = 0xffff;
2621 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2623 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2624 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2625 DESC_R_MASK | DESC_A_MASK);
2626 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2627 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2628 DESC_A_MASK);
2629 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2630 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2631 DESC_A_MASK);
2632 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2633 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2634 DESC_A_MASK);
2635 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2636 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2637 DESC_A_MASK);
2638 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2639 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2640 DESC_A_MASK);
2642 env->eip = 0xfff0;
2643 env->regs[R_EDX] = env->cpuid_version;
2645 env->eflags = 0x2;
2647 /* FPU init */
2648 for (i = 0; i < 8; i++) {
2649 env->fptags[i] = 1;
2651 cpu_set_fpuc(env, 0x37f);
2653 env->mxcsr = 0x1f80;
2654 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2656 env->pat = 0x0007040600070406ULL;
2657 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2659 memset(env->dr, 0, sizeof(env->dr));
2660 env->dr[6] = DR6_FIXED_1;
2661 env->dr[7] = DR7_FIXED_1;
2662 cpu_breakpoint_remove_all(s, BP_CPU);
2663 cpu_watchpoint_remove_all(s, BP_CPU);
2665 env->xcr0 = 1;
2668 * SDM 11.11.5 requires:
2669 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2670 * - IA32_MTRR_PHYSMASKn.V = 0
2671 * All other bits are undefined. For simplification, zero it all.
2673 env->mtrr_deftype = 0;
2674 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2675 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2677 #if !defined(CONFIG_USER_ONLY)
2678 /* We hard-wire the BSP to the first CPU. */
2679 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2681 s->halted = !cpu_is_bsp(cpu);
2683 if (kvm_enabled()) {
2684 kvm_arch_reset_vcpu(cpu);
2686 #endif
2689 #ifndef CONFIG_USER_ONLY
2690 bool cpu_is_bsp(X86CPU *cpu)
2692 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2695 /* TODO: remove me, when reset over QOM tree is implemented */
2696 static void x86_cpu_machine_reset_cb(void *opaque)
2698 X86CPU *cpu = opaque;
2699 cpu_reset(CPU(cpu));
2701 #endif
2703 static void mce_init(X86CPU *cpu)
2705 CPUX86State *cenv = &cpu->env;
2706 unsigned int bank;
2708 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2709 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2710 (CPUID_MCE | CPUID_MCA)) {
2711 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2712 cenv->mcg_ctl = ~(uint64_t)0;
2713 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2714 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2719 #ifndef CONFIG_USER_ONLY
2720 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2722 DeviceState *dev = DEVICE(cpu);
2723 APICCommonState *apic;
2724 const char *apic_type = "apic";
2726 if (kvm_irqchip_in_kernel()) {
2727 apic_type = "kvm-apic";
2728 } else if (xen_enabled()) {
2729 apic_type = "xen-apic";
2732 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2733 if (cpu->apic_state == NULL) {
2734 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2735 return;
2738 object_property_add_child(OBJECT(cpu), "apic",
2739 OBJECT(cpu->apic_state), NULL);
2740 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2741 /* TODO: convert to link<> */
2742 apic = APIC_COMMON(cpu->apic_state);
2743 apic->cpu = cpu;
2746 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2748 if (cpu->apic_state == NULL) {
2749 return;
2751 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2752 errp);
2755 static void x86_cpu_machine_done(Notifier *n, void *unused)
2757 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2758 MemoryRegion *smram =
2759 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2761 if (smram) {
2762 cpu->smram = g_new(MemoryRegion, 1);
2763 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2764 smram, 0, 1ull << 32);
2765 memory_region_set_enabled(cpu->smram, false);
2766 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2769 #else
2770 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2773 #endif
2776 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2777 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2778 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2779 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2780 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2781 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2782 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2784 CPUState *cs = CPU(dev);
2785 X86CPU *cpu = X86_CPU(dev);
2786 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2787 CPUX86State *env = &cpu->env;
2788 Error *local_err = NULL;
2789 static bool ht_warned;
2791 if (cpu->apic_id < 0) {
2792 error_setg(errp, "apic-id property was not initialized properly");
2793 return;
2796 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2797 env->cpuid_level = 7;
2800 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2801 * CPUID[1].EDX.
2803 if (IS_AMD_CPU(env)) {
2804 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2805 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2806 & CPUID_EXT2_AMD_ALIASES);
2810 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2811 error_setg(&local_err,
2812 kvm_enabled() ?
2813 "Host doesn't support requested features" :
2814 "TCG doesn't support requested features");
2815 goto out;
2818 #ifndef CONFIG_USER_ONLY
2819 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2821 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2822 x86_cpu_apic_create(cpu, &local_err);
2823 if (local_err != NULL) {
2824 goto out;
2827 #endif
2829 mce_init(cpu);
2831 #ifndef CONFIG_USER_ONLY
2832 if (tcg_enabled()) {
2833 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2834 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2835 cs->as = g_new(AddressSpace, 1);
2837 /* Outer container... */
2838 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2839 memory_region_set_enabled(cpu->cpu_as_root, true);
2841 /* ... with two regions inside: normal system memory with low
2842 * priority, and...
2844 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2845 get_system_memory(), 0, ~0ull);
2846 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2847 memory_region_set_enabled(cpu->cpu_as_mem, true);
2848 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2850 /* ... SMRAM with higher priority, linked from /machine/smram. */
2851 cpu->machine_done.notify = x86_cpu_machine_done;
2852 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2854 #endif
2856 qemu_init_vcpu(cs);
2858 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2859 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2860 * based on inputs (sockets,cores,threads), it is still better to gives
2861 * users a warning.
2863 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2864 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2866 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2867 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2868 " -smp options properly.");
2869 ht_warned = true;
2872 x86_cpu_apic_realize(cpu, &local_err);
2873 if (local_err != NULL) {
2874 goto out;
2876 cpu_reset(cs);
2878 xcc->parent_realize(dev, &local_err);
2880 out:
2881 if (local_err != NULL) {
2882 error_propagate(errp, local_err);
2883 return;
2887 typedef struct BitProperty {
2888 uint32_t *ptr;
2889 uint32_t mask;
2890 } BitProperty;
2892 static void x86_cpu_get_bit_prop(Object *obj,
2893 struct Visitor *v,
2894 void *opaque,
2895 const char *name,
2896 Error **errp)
2898 BitProperty *fp = opaque;
2899 bool value = (*fp->ptr & fp->mask) == fp->mask;
2900 visit_type_bool(v, &value, name, errp);
2903 static void x86_cpu_set_bit_prop(Object *obj,
2904 struct Visitor *v,
2905 void *opaque,
2906 const char *name,
2907 Error **errp)
2909 DeviceState *dev = DEVICE(obj);
2910 BitProperty *fp = opaque;
2911 Error *local_err = NULL;
2912 bool value;
2914 if (dev->realized) {
2915 qdev_prop_set_after_realize(dev, name, errp);
2916 return;
2919 visit_type_bool(v, &value, name, &local_err);
2920 if (local_err) {
2921 error_propagate(errp, local_err);
2922 return;
2925 if (value) {
2926 *fp->ptr |= fp->mask;
2927 } else {
2928 *fp->ptr &= ~fp->mask;
2932 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2933 void *opaque)
2935 BitProperty *prop = opaque;
2936 g_free(prop);
2939 /* Register a boolean property to get/set a single bit in a uint32_t field.
2941 * The same property name can be registered multiple times to make it affect
2942 * multiple bits in the same FeatureWord. In that case, the getter will return
2943 * true only if all bits are set.
2945 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2946 const char *prop_name,
2947 uint32_t *field,
2948 int bitnr)
2950 BitProperty *fp;
2951 ObjectProperty *op;
2952 uint32_t mask = (1UL << bitnr);
2954 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2955 if (op) {
2956 fp = op->opaque;
2957 assert(fp->ptr == field);
2958 fp->mask |= mask;
2959 } else {
2960 fp = g_new0(BitProperty, 1);
2961 fp->ptr = field;
2962 fp->mask = mask;
2963 object_property_add(OBJECT(cpu), prop_name, "bool",
2964 x86_cpu_get_bit_prop,
2965 x86_cpu_set_bit_prop,
2966 x86_cpu_release_bit_prop, fp, &error_abort);
2970 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2971 FeatureWord w,
2972 int bitnr)
2974 Object *obj = OBJECT(cpu);
2975 int i;
2976 char **names;
2977 FeatureWordInfo *fi = &feature_word_info[w];
2979 if (!fi->feat_names) {
2980 return;
2982 if (!fi->feat_names[bitnr]) {
2983 return;
2986 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
2988 feat2prop(names[0]);
2989 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
2991 for (i = 1; names[i]; i++) {
2992 feat2prop(names[i]);
2993 object_property_add_alias(obj, names[i], obj, g_strdup(names[0]),
2994 &error_abort);
2997 g_strfreev(names);
3000 static void x86_cpu_initfn(Object *obj)
3002 CPUState *cs = CPU(obj);
3003 X86CPU *cpu = X86_CPU(obj);
3004 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3005 CPUX86State *env = &cpu->env;
3006 FeatureWord w;
3007 static int inited;
3009 cs->env_ptr = env;
3010 cpu_exec_init(env);
3012 object_property_add(obj, "family", "int",
3013 x86_cpuid_version_get_family,
3014 x86_cpuid_version_set_family, NULL, NULL, NULL);
3015 object_property_add(obj, "model", "int",
3016 x86_cpuid_version_get_model,
3017 x86_cpuid_version_set_model, NULL, NULL, NULL);
3018 object_property_add(obj, "stepping", "int",
3019 x86_cpuid_version_get_stepping,
3020 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3021 object_property_add_str(obj, "vendor",
3022 x86_cpuid_get_vendor,
3023 x86_cpuid_set_vendor, NULL);
3024 object_property_add_str(obj, "model-id",
3025 x86_cpuid_get_model_id,
3026 x86_cpuid_set_model_id, NULL);
3027 object_property_add(obj, "tsc-frequency", "int",
3028 x86_cpuid_get_tsc_freq,
3029 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3030 object_property_add(obj, "apic-id", "int",
3031 x86_cpuid_get_apic_id,
3032 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3033 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3034 x86_cpu_get_feature_words,
3035 NULL, NULL, (void *)env->features, NULL);
3036 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3037 x86_cpu_get_feature_words,
3038 NULL, NULL, (void *)cpu->filtered_features, NULL);
3040 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3042 #ifndef CONFIG_USER_ONLY
3043 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3044 cpu->apic_id = -1;
3045 #endif
3047 for (w = 0; w < FEATURE_WORDS; w++) {
3048 int bitnr;
3050 for (bitnr = 0; bitnr < 32; bitnr++) {
3051 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3055 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3057 /* init various static tables used in TCG mode */
3058 if (tcg_enabled() && !inited) {
3059 inited = 1;
3060 optimize_flags_init();
3064 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3066 X86CPU *cpu = X86_CPU(cs);
3068 return cpu->apic_id;
3071 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3073 X86CPU *cpu = X86_CPU(cs);
3075 return cpu->env.cr[0] & CR0_PG_MASK;
3078 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3080 X86CPU *cpu = X86_CPU(cs);
3082 cpu->env.eip = value;
3085 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3087 X86CPU *cpu = X86_CPU(cs);
3089 cpu->env.eip = tb->pc - tb->cs_base;
3092 static bool x86_cpu_has_work(CPUState *cs)
3094 X86CPU *cpu = X86_CPU(cs);
3095 CPUX86State *env = &cpu->env;
3097 #if !defined(CONFIG_USER_ONLY)
3098 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3099 apic_poll_irq(cpu->apic_state);
3100 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
3102 #endif
3104 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3105 (env->eflags & IF_MASK)) ||
3106 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3107 CPU_INTERRUPT_INIT |
3108 CPU_INTERRUPT_SIPI |
3109 CPU_INTERRUPT_MCE)) ||
3110 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3111 !(env->hflags & HF_SMM_MASK));
3114 static Property x86_cpu_properties[] = {
3115 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3116 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3117 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3118 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3119 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3120 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3121 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3122 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3123 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3124 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3125 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3126 DEFINE_PROP_END_OF_LIST()
3129 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3131 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3132 CPUClass *cc = CPU_CLASS(oc);
3133 DeviceClass *dc = DEVICE_CLASS(oc);
3135 xcc->parent_realize = dc->realize;
3136 dc->realize = x86_cpu_realizefn;
3137 dc->bus_type = TYPE_ICC_BUS;
3138 dc->props = x86_cpu_properties;
3140 xcc->parent_reset = cc->reset;
3141 cc->reset = x86_cpu_reset;
3142 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3144 cc->class_by_name = x86_cpu_class_by_name;
3145 cc->parse_features = x86_cpu_parse_featurestr;
3146 cc->has_work = x86_cpu_has_work;
3147 cc->do_interrupt = x86_cpu_do_interrupt;
3148 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3149 cc->dump_state = x86_cpu_dump_state;
3150 cc->set_pc = x86_cpu_set_pc;
3151 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3152 cc->gdb_read_register = x86_cpu_gdb_read_register;
3153 cc->gdb_write_register = x86_cpu_gdb_write_register;
3154 cc->get_arch_id = x86_cpu_get_arch_id;
3155 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3156 #ifdef CONFIG_USER_ONLY
3157 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3158 #else
3159 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3160 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3161 cc->write_elf64_note = x86_cpu_write_elf64_note;
3162 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3163 cc->write_elf32_note = x86_cpu_write_elf32_note;
3164 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3165 cc->vmsd = &vmstate_x86_cpu;
3166 #endif
3167 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3168 #ifndef CONFIG_USER_ONLY
3169 cc->debug_excp_handler = breakpoint_handler;
3170 #endif
3171 cc->cpu_exec_enter = x86_cpu_exec_enter;
3172 cc->cpu_exec_exit = x86_cpu_exec_exit;
3175 static const TypeInfo x86_cpu_type_info = {
3176 .name = TYPE_X86_CPU,
3177 .parent = TYPE_CPU,
3178 .instance_size = sizeof(X86CPU),
3179 .instance_init = x86_cpu_initfn,
3180 .abstract = true,
3181 .class_size = sizeof(X86CPUClass),
3182 .class_init = x86_cpu_common_class_init,
3185 static void x86_cpu_register_types(void)
3187 int i;
3189 type_register_static(&x86_cpu_type_info);
3190 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3191 x86_register_cpudef_type(&builtin_x86_defs[i]);
3193 #ifdef CONFIG_KVM
3194 type_register_static(&host_x86_cpu_type_info);
3195 #endif
3198 type_init(x86_cpu_register_types)