MAINTAINERS: add rocker
[qemu/ar7.git] / target-i386 / cpu.c
blob3305e094138a52cec66f54722f8e920ba3d65a8c
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #include "hw/hw.h"
39 #if defined(CONFIG_KVM)
40 #include <linux/kvm_para.h>
41 #endif
43 #include "sysemu/sysemu.h"
44 #include "hw/qdev-properties.h"
45 #include "hw/cpu/icc_bus.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
52 /* Cache topology CPUID constants: */
54 /* CPUID Leaf 2 Descriptors */
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61 /* CPUID Leaf 4 constants: */
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
68 #define CPUID_4_LEVEL(l) ((l) << 5)
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
78 #define ASSOC_FULL 0xFF
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
95 /* Definitions of the hardcoded cache entries we expose: */
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
139 /* TLB definitions: */
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
172 dst[CPUID_VENDOR_SZ] = '\0';
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
262 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 static const char *cpuid_apm_edx_feature_name[] = {
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 "invtsc", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *cpuid_xsave_feature_name[] = {
277 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
287 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
288 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
289 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
290 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_FXSR)
293 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
294 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
295 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
296 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
297 CPUID_PAE | CPUID_SEP | CPUID_APIC)
299 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
300 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
301 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
302 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
303 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
304 /* partly implemented:
305 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
306 /* missing:
307 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
308 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
309 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
310 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
311 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
312 /* missing:
313 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
314 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
315 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
316 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
317 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
318 CPUID_EXT_RDRAND */
320 #ifdef TARGET_X86_64
321 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
322 #else
323 #define TCG_EXT2_X86_64_FEATURES 0
324 #endif
326 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
327 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
328 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
329 TCG_EXT2_X86_64_FEATURES)
330 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
331 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
332 #define TCG_EXT4_FEATURES 0
333 #define TCG_SVM_FEATURES 0
334 #define TCG_KVM_FEATURES 0
335 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
336 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
337 /* missing:
338 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
339 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
340 CPUID_7_0_EBX_RDSEED */
341 #define TCG_APM_FEATURES 0
344 typedef struct FeatureWordInfo {
345 const char **feat_names;
346 uint32_t cpuid_eax; /* Input EAX for CPUID */
347 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
348 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
349 int cpuid_reg; /* output register (R_* constant) */
350 uint32_t tcg_features; /* Feature flags supported by TCG */
351 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
352 } FeatureWordInfo;
354 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
355 [FEAT_1_EDX] = {
356 .feat_names = feature_name,
357 .cpuid_eax = 1, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_FEATURES,
360 [FEAT_1_ECX] = {
361 .feat_names = ext_feature_name,
362 .cpuid_eax = 1, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT_FEATURES,
365 [FEAT_8000_0001_EDX] = {
366 .feat_names = ext2_feature_name,
367 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT2_FEATURES,
370 [FEAT_8000_0001_ECX] = {
371 .feat_names = ext3_feature_name,
372 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
373 .tcg_features = TCG_EXT3_FEATURES,
375 [FEAT_C000_0001_EDX] = {
376 .feat_names = ext4_feature_name,
377 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_EXT4_FEATURES,
380 [FEAT_KVM] = {
381 .feat_names = kvm_feature_name,
382 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
383 .tcg_features = TCG_KVM_FEATURES,
385 [FEAT_SVM] = {
386 .feat_names = svm_feature_name,
387 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
388 .tcg_features = TCG_SVM_FEATURES,
390 [FEAT_7_0_EBX] = {
391 .feat_names = cpuid_7_0_ebx_feature_name,
392 .cpuid_eax = 7,
393 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
394 .cpuid_reg = R_EBX,
395 .tcg_features = TCG_7_0_EBX_FEATURES,
397 [FEAT_8000_0007_EDX] = {
398 .feat_names = cpuid_apm_edx_feature_name,
399 .cpuid_eax = 0x80000007,
400 .cpuid_reg = R_EDX,
401 .tcg_features = TCG_APM_FEATURES,
402 .unmigratable_flags = CPUID_APM_INVTSC,
404 [FEAT_XSAVE] = {
405 .feat_names = cpuid_xsave_feature_name,
406 .cpuid_eax = 0xd,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
408 .cpuid_reg = R_EAX,
409 .tcg_features = 0,
413 typedef struct X86RegisterInfo32 {
414 /* Name of register */
415 const char *name;
416 /* QAPI enum value register */
417 X86CPURegister32 qapi_enum;
418 } X86RegisterInfo32;
420 #define REGISTER(reg) \
421 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
422 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
423 REGISTER(EAX),
424 REGISTER(ECX),
425 REGISTER(EDX),
426 REGISTER(EBX),
427 REGISTER(ESP),
428 REGISTER(EBP),
429 REGISTER(ESI),
430 REGISTER(EDI),
432 #undef REGISTER
434 typedef struct ExtSaveArea {
435 uint32_t feature, bits;
436 uint32_t offset, size;
437 } ExtSaveArea;
439 static const ExtSaveArea ext_save_areas[] = {
440 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
441 .offset = 0x240, .size = 0x100 },
442 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
443 .offset = 0x3c0, .size = 0x40 },
444 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
445 .offset = 0x400, .size = 0x40 },
446 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
447 .offset = 0x440, .size = 0x40 },
448 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
449 .offset = 0x480, .size = 0x200 },
450 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
451 .offset = 0x680, .size = 0x400 },
454 const char *get_register_name_32(unsigned int reg)
456 if (reg >= CPU_NB_REGS32) {
457 return NULL;
459 return x86_reg_info_32[reg].name;
462 /* KVM-specific features that are automatically added to all CPU models
463 * when KVM is enabled.
465 static uint32_t kvm_default_features[FEATURE_WORDS] = {
466 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
467 (1 << KVM_FEATURE_NOP_IO_DELAY) |
468 (1 << KVM_FEATURE_CLOCKSOURCE2) |
469 (1 << KVM_FEATURE_ASYNC_PF) |
470 (1 << KVM_FEATURE_STEAL_TIME) |
471 (1 << KVM_FEATURE_PV_EOI) |
472 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
473 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
476 /* Features that are not added by default to any CPU model when KVM is enabled.
478 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
479 [FEAT_1_EDX] = CPUID_ACPI,
480 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
481 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
484 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
486 kvm_default_features[w] &= ~features;
489 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
491 kvm_default_unset_features[w] &= ~features;
495 * Returns the set of feature flags that are supported and migratable by
496 * QEMU, for a given FeatureWord.
498 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
500 FeatureWordInfo *wi = &feature_word_info[w];
501 uint32_t r = 0;
502 int i;
504 for (i = 0; i < 32; i++) {
505 uint32_t f = 1U << i;
506 /* If the feature name is unknown, it is not supported by QEMU yet */
507 if (!wi->feat_names[i]) {
508 continue;
510 /* Skip features known to QEMU, but explicitly marked as unmigratable */
511 if (wi->unmigratable_flags & f) {
512 continue;
514 r |= f;
516 return r;
519 void host_cpuid(uint32_t function, uint32_t count,
520 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
522 uint32_t vec[4];
524 #ifdef __x86_64__
525 asm volatile("cpuid"
526 : "=a"(vec[0]), "=b"(vec[1]),
527 "=c"(vec[2]), "=d"(vec[3])
528 : "0"(function), "c"(count) : "cc");
529 #elif defined(__i386__)
530 asm volatile("pusha \n\t"
531 "cpuid \n\t"
532 "mov %%eax, 0(%2) \n\t"
533 "mov %%ebx, 4(%2) \n\t"
534 "mov %%ecx, 8(%2) \n\t"
535 "mov %%edx, 12(%2) \n\t"
536 "popa"
537 : : "a"(function), "c"(count), "S"(vec)
538 : "memory", "cc");
539 #else
540 abort();
541 #endif
543 if (eax)
544 *eax = vec[0];
545 if (ebx)
546 *ebx = vec[1];
547 if (ecx)
548 *ecx = vec[2];
549 if (edx)
550 *edx = vec[3];
553 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
555 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
556 * a substring. ex if !NULL points to the first char after a substring,
557 * otherwise the string is assumed to sized by a terminating nul.
558 * Return lexical ordering of *s1:*s2.
560 static int sstrcmp(const char *s1, const char *e1,
561 const char *s2, const char *e2)
563 for (;;) {
564 if (!*s1 || !*s2 || *s1 != *s2)
565 return (*s1 - *s2);
566 ++s1, ++s2;
567 if (s1 == e1 && s2 == e2)
568 return (0);
569 else if (s1 == e1)
570 return (*s2);
571 else if (s2 == e2)
572 return (*s1);
576 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
577 * '|' delimited (possibly empty) strings in which case search for a match
578 * within the alternatives proceeds left to right. Return 0 for success,
579 * non-zero otherwise.
581 static int altcmp(const char *s, const char *e, const char *altstr)
583 const char *p, *q;
585 for (q = p = altstr; ; ) {
586 while (*p && *p != '|')
587 ++p;
588 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
589 return (0);
590 if (!*p)
591 return (1);
592 else
593 q = ++p;
597 /* search featureset for flag *[s..e), if found set corresponding bit in
598 * *pval and return true, otherwise return false
600 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
601 const char **featureset)
603 uint32_t mask;
604 const char **ppc;
605 bool found = false;
607 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
608 if (*ppc && !altcmp(s, e, *ppc)) {
609 *pval |= mask;
610 found = true;
613 return found;
616 static void add_flagname_to_bitmaps(const char *flagname,
617 FeatureWordArray words,
618 Error **errp)
620 FeatureWord w;
621 for (w = 0; w < FEATURE_WORDS; w++) {
622 FeatureWordInfo *wi = &feature_word_info[w];
623 if (wi->feat_names &&
624 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
625 break;
628 if (w == FEATURE_WORDS) {
629 error_setg(errp, "CPU feature %s not found", flagname);
633 /* CPU class name definitions: */
635 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
636 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
638 /* Return type name for a given CPU model name
639 * Caller is responsible for freeing the returned string.
641 static char *x86_cpu_type_name(const char *model_name)
643 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
646 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
648 ObjectClass *oc;
649 char *typename;
651 if (cpu_model == NULL) {
652 return NULL;
655 typename = x86_cpu_type_name(cpu_model);
656 oc = object_class_by_name(typename);
657 g_free(typename);
658 return oc;
661 struct X86CPUDefinition {
662 const char *name;
663 uint32_t level;
664 uint32_t xlevel;
665 uint32_t xlevel2;
666 /* vendor is zero-terminated, 12 character ASCII string */
667 char vendor[CPUID_VENDOR_SZ + 1];
668 int family;
669 int model;
670 int stepping;
671 FeatureWordArray features;
672 char model_id[48];
673 bool cache_info_passthrough;
676 static X86CPUDefinition builtin_x86_defs[] = {
678 .name = "qemu64",
679 .level = 4,
680 .vendor = CPUID_VENDOR_AMD,
681 .family = 6,
682 .model = 6,
683 .stepping = 3,
684 .features[FEAT_1_EDX] =
685 PPRO_FEATURES |
686 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
687 CPUID_PSE36,
688 .features[FEAT_1_ECX] =
689 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
690 .features[FEAT_8000_0001_EDX] =
691 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
692 .features[FEAT_8000_0001_ECX] =
693 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
694 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
695 .xlevel = 0x8000000A,
698 .name = "phenom",
699 .level = 5,
700 .vendor = CPUID_VENDOR_AMD,
701 .family = 16,
702 .model = 2,
703 .stepping = 3,
704 /* Missing: CPUID_HT */
705 .features[FEAT_1_EDX] =
706 PPRO_FEATURES |
707 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
708 CPUID_PSE36 | CPUID_VME,
709 .features[FEAT_1_ECX] =
710 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
711 CPUID_EXT_POPCNT,
712 .features[FEAT_8000_0001_EDX] =
713 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
714 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
715 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
716 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
717 CPUID_EXT3_CR8LEG,
718 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
719 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
720 .features[FEAT_8000_0001_ECX] =
721 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
722 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
723 /* Missing: CPUID_SVM_LBRV */
724 .features[FEAT_SVM] =
725 CPUID_SVM_NPT,
726 .xlevel = 0x8000001A,
727 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
730 .name = "core2duo",
731 .level = 10,
732 .vendor = CPUID_VENDOR_INTEL,
733 .family = 6,
734 .model = 15,
735 .stepping = 11,
736 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
737 .features[FEAT_1_EDX] =
738 PPRO_FEATURES |
739 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
740 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
741 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
742 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
743 .features[FEAT_1_ECX] =
744 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
745 CPUID_EXT_CX16,
746 .features[FEAT_8000_0001_EDX] =
747 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
748 .features[FEAT_8000_0001_ECX] =
749 CPUID_EXT3_LAHF_LM,
750 .xlevel = 0x80000008,
751 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
754 .name = "kvm64",
755 .level = 5,
756 .vendor = CPUID_VENDOR_INTEL,
757 .family = 15,
758 .model = 6,
759 .stepping = 1,
760 /* Missing: CPUID_HT */
761 .features[FEAT_1_EDX] =
762 PPRO_FEATURES | CPUID_VME |
763 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
764 CPUID_PSE36,
765 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
766 .features[FEAT_1_ECX] =
767 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
768 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
771 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
772 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
773 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
774 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
775 .features[FEAT_8000_0001_ECX] =
777 .xlevel = 0x80000008,
778 .model_id = "Common KVM processor"
781 .name = "qemu32",
782 .level = 4,
783 .vendor = CPUID_VENDOR_INTEL,
784 .family = 6,
785 .model = 6,
786 .stepping = 3,
787 .features[FEAT_1_EDX] =
788 PPRO_FEATURES,
789 .features[FEAT_1_ECX] =
790 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
791 .xlevel = 0x80000004,
794 .name = "kvm32",
795 .level = 5,
796 .vendor = CPUID_VENDOR_INTEL,
797 .family = 15,
798 .model = 6,
799 .stepping = 1,
800 .features[FEAT_1_EDX] =
801 PPRO_FEATURES | CPUID_VME |
802 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
803 .features[FEAT_1_ECX] =
804 CPUID_EXT_SSE3,
805 .features[FEAT_8000_0001_ECX] =
807 .xlevel = 0x80000008,
808 .model_id = "Common 32-bit KVM processor"
811 .name = "coreduo",
812 .level = 10,
813 .vendor = CPUID_VENDOR_INTEL,
814 .family = 6,
815 .model = 14,
816 .stepping = 8,
817 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
818 .features[FEAT_1_EDX] =
819 PPRO_FEATURES | CPUID_VME |
820 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
821 CPUID_SS,
822 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
823 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
826 .features[FEAT_8000_0001_EDX] =
827 CPUID_EXT2_NX,
828 .xlevel = 0x80000008,
829 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
832 .name = "486",
833 .level = 1,
834 .vendor = CPUID_VENDOR_INTEL,
835 .family = 4,
836 .model = 8,
837 .stepping = 0,
838 .features[FEAT_1_EDX] =
839 I486_FEATURES,
840 .xlevel = 0,
843 .name = "pentium",
844 .level = 1,
845 .vendor = CPUID_VENDOR_INTEL,
846 .family = 5,
847 .model = 4,
848 .stepping = 3,
849 .features[FEAT_1_EDX] =
850 PENTIUM_FEATURES,
851 .xlevel = 0,
854 .name = "pentium2",
855 .level = 2,
856 .vendor = CPUID_VENDOR_INTEL,
857 .family = 6,
858 .model = 5,
859 .stepping = 2,
860 .features[FEAT_1_EDX] =
861 PENTIUM2_FEATURES,
862 .xlevel = 0,
865 .name = "pentium3",
866 .level = 2,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 6,
869 .model = 7,
870 .stepping = 3,
871 .features[FEAT_1_EDX] =
872 PENTIUM3_FEATURES,
873 .xlevel = 0,
876 .name = "athlon",
877 .level = 2,
878 .vendor = CPUID_VENDOR_AMD,
879 .family = 6,
880 .model = 2,
881 .stepping = 3,
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
884 CPUID_MCA,
885 .features[FEAT_8000_0001_EDX] =
886 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
887 .xlevel = 0x80000008,
890 .name = "n270",
891 /* original is on level 10 */
892 .level = 5,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 6,
895 .model = 28,
896 .stepping = 2,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
901 CPUID_ACPI | CPUID_SS,
902 /* Some CPUs got no CPUID_SEP */
903 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
904 * CPUID_EXT_XTPR */
905 .features[FEAT_1_ECX] =
906 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
907 CPUID_EXT_MOVBE,
908 .features[FEAT_8000_0001_EDX] =
909 CPUID_EXT2_NX,
910 .features[FEAT_8000_0001_ECX] =
911 CPUID_EXT3_LAHF_LM,
912 .xlevel = 0x8000000A,
913 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
916 .name = "Conroe",
917 .level = 4,
918 .vendor = CPUID_VENDOR_INTEL,
919 .family = 6,
920 .model = 15,
921 .stepping = 3,
922 .features[FEAT_1_EDX] =
923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
927 CPUID_DE | CPUID_FP87,
928 .features[FEAT_1_ECX] =
929 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
930 .features[FEAT_8000_0001_EDX] =
931 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
932 .features[FEAT_8000_0001_ECX] =
933 CPUID_EXT3_LAHF_LM,
934 .xlevel = 0x8000000A,
935 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
938 .name = "Penryn",
939 .level = 4,
940 .vendor = CPUID_VENDOR_INTEL,
941 .family = 6,
942 .model = 23,
943 .stepping = 3,
944 .features[FEAT_1_EDX] =
945 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
946 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
947 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
948 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
949 CPUID_DE | CPUID_FP87,
950 .features[FEAT_1_ECX] =
951 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
952 CPUID_EXT_SSE3,
953 .features[FEAT_8000_0001_EDX] =
954 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
955 .features[FEAT_8000_0001_ECX] =
956 CPUID_EXT3_LAHF_LM,
957 .xlevel = 0x8000000A,
958 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
961 .name = "Nehalem",
962 .level = 4,
963 .vendor = CPUID_VENDOR_INTEL,
964 .family = 6,
965 .model = 26,
966 .stepping = 3,
967 .features[FEAT_1_EDX] =
968 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
969 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
970 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
971 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
972 CPUID_DE | CPUID_FP87,
973 .features[FEAT_1_ECX] =
974 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
975 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
976 .features[FEAT_8000_0001_EDX] =
977 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
978 .features[FEAT_8000_0001_ECX] =
979 CPUID_EXT3_LAHF_LM,
980 .xlevel = 0x8000000A,
981 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
984 .name = "Westmere",
985 .level = 11,
986 .vendor = CPUID_VENDOR_INTEL,
987 .family = 6,
988 .model = 44,
989 .stepping = 1,
990 .features[FEAT_1_EDX] =
991 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
992 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
993 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
994 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
995 CPUID_DE | CPUID_FP87,
996 .features[FEAT_1_ECX] =
997 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
998 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
999 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1000 .features[FEAT_8000_0001_EDX] =
1001 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1002 .features[FEAT_8000_0001_ECX] =
1003 CPUID_EXT3_LAHF_LM,
1004 .xlevel = 0x8000000A,
1005 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1008 .name = "SandyBridge",
1009 .level = 0xd,
1010 .vendor = CPUID_VENDOR_INTEL,
1011 .family = 6,
1012 .model = 42,
1013 .stepping = 1,
1014 .features[FEAT_1_EDX] =
1015 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1016 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1017 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1018 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1019 CPUID_DE | CPUID_FP87,
1020 .features[FEAT_1_ECX] =
1021 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1022 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1023 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1024 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1025 CPUID_EXT_SSE3,
1026 .features[FEAT_8000_0001_EDX] =
1027 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1028 CPUID_EXT2_SYSCALL,
1029 .features[FEAT_8000_0001_ECX] =
1030 CPUID_EXT3_LAHF_LM,
1031 .features[FEAT_XSAVE] =
1032 CPUID_XSAVE_XSAVEOPT,
1033 .xlevel = 0x8000000A,
1034 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1037 .name = "IvyBridge",
1038 .level = 0xd,
1039 .vendor = CPUID_VENDOR_INTEL,
1040 .family = 6,
1041 .model = 58,
1042 .stepping = 9,
1043 .features[FEAT_1_EDX] =
1044 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1045 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1046 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1047 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1048 CPUID_DE | CPUID_FP87,
1049 .features[FEAT_1_ECX] =
1050 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1051 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1052 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1053 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1054 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1055 .features[FEAT_7_0_EBX] =
1056 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1057 CPUID_7_0_EBX_ERMS,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1060 CPUID_EXT2_SYSCALL,
1061 .features[FEAT_8000_0001_ECX] =
1062 CPUID_EXT3_LAHF_LM,
1063 .features[FEAT_XSAVE] =
1064 CPUID_XSAVE_XSAVEOPT,
1065 .xlevel = 0x8000000A,
1066 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1069 .name = "Haswell-noTSX",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 60,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1084 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1085 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1086 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1087 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1088 .features[FEAT_8000_0001_EDX] =
1089 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1090 CPUID_EXT2_SYSCALL,
1091 .features[FEAT_8000_0001_ECX] =
1092 CPUID_EXT3_LAHF_LM,
1093 .features[FEAT_7_0_EBX] =
1094 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1095 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1096 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1097 .features[FEAT_XSAVE] =
1098 CPUID_XSAVE_XSAVEOPT,
1099 .xlevel = 0x8000000A,
1100 .model_id = "Intel Core Processor (Haswell, no TSX)",
1101 }, {
1102 .name = "Haswell",
1103 .level = 0xd,
1104 .vendor = CPUID_VENDOR_INTEL,
1105 .family = 6,
1106 .model = 60,
1107 .stepping = 1,
1108 .features[FEAT_1_EDX] =
1109 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1110 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1111 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1112 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1113 CPUID_DE | CPUID_FP87,
1114 .features[FEAT_1_ECX] =
1115 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1116 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1117 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1118 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1119 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1120 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1121 .features[FEAT_8000_0001_EDX] =
1122 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1123 CPUID_EXT2_SYSCALL,
1124 .features[FEAT_8000_0001_ECX] =
1125 CPUID_EXT3_LAHF_LM,
1126 .features[FEAT_7_0_EBX] =
1127 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1128 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1129 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1130 CPUID_7_0_EBX_RTM,
1131 .features[FEAT_XSAVE] =
1132 CPUID_XSAVE_XSAVEOPT,
1133 .xlevel = 0x8000000A,
1134 .model_id = "Intel Core Processor (Haswell)",
1137 .name = "Broadwell-noTSX",
1138 .level = 0xd,
1139 .vendor = CPUID_VENDOR_INTEL,
1140 .family = 6,
1141 .model = 61,
1142 .stepping = 2,
1143 .features[FEAT_1_EDX] =
1144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1148 CPUID_DE | CPUID_FP87,
1149 .features[FEAT_1_ECX] =
1150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1156 .features[FEAT_8000_0001_EDX] =
1157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1158 CPUID_EXT2_SYSCALL,
1159 .features[FEAT_8000_0001_ECX] =
1160 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1161 .features[FEAT_7_0_EBX] =
1162 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1163 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1164 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1165 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1166 CPUID_7_0_EBX_SMAP,
1167 .features[FEAT_XSAVE] =
1168 CPUID_XSAVE_XSAVEOPT,
1169 .xlevel = 0x8000000A,
1170 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1173 .name = "Broadwell",
1174 .level = 0xd,
1175 .vendor = CPUID_VENDOR_INTEL,
1176 .family = 6,
1177 .model = 61,
1178 .stepping = 2,
1179 .features[FEAT_1_EDX] =
1180 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1181 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1182 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1183 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1184 CPUID_DE | CPUID_FP87,
1185 .features[FEAT_1_ECX] =
1186 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1187 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1188 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1189 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1190 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1191 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1192 .features[FEAT_8000_0001_EDX] =
1193 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1194 CPUID_EXT2_SYSCALL,
1195 .features[FEAT_8000_0001_ECX] =
1196 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1197 .features[FEAT_7_0_EBX] =
1198 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1199 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1200 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1201 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1202 CPUID_7_0_EBX_SMAP,
1203 .features[FEAT_XSAVE] =
1204 CPUID_XSAVE_XSAVEOPT,
1205 .xlevel = 0x8000000A,
1206 .model_id = "Intel Core Processor (Broadwell)",
1209 .name = "Opteron_G1",
1210 .level = 5,
1211 .vendor = CPUID_VENDOR_AMD,
1212 .family = 15,
1213 .model = 6,
1214 .stepping = 1,
1215 .features[FEAT_1_EDX] =
1216 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1217 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1218 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1219 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1220 CPUID_DE | CPUID_FP87,
1221 .features[FEAT_1_ECX] =
1222 CPUID_EXT_SSE3,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1225 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1226 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1227 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1228 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1229 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1230 .xlevel = 0x80000008,
1231 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1234 .name = "Opteron_G2",
1235 .level = 5,
1236 .vendor = CPUID_VENDOR_AMD,
1237 .family = 15,
1238 .model = 6,
1239 .stepping = 1,
1240 .features[FEAT_1_EDX] =
1241 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1242 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1243 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1244 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1245 CPUID_DE | CPUID_FP87,
1246 .features[FEAT_1_ECX] =
1247 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1248 .features[FEAT_8000_0001_EDX] =
1249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1250 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1251 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1252 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1253 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1254 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1255 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1256 .features[FEAT_8000_0001_ECX] =
1257 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1258 .xlevel = 0x80000008,
1259 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1262 .name = "Opteron_G3",
1263 .level = 5,
1264 .vendor = CPUID_VENDOR_AMD,
1265 .family = 15,
1266 .model = 6,
1267 .stepping = 1,
1268 .features[FEAT_1_EDX] =
1269 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1270 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1271 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1272 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1273 CPUID_DE | CPUID_FP87,
1274 .features[FEAT_1_ECX] =
1275 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1276 CPUID_EXT_SSE3,
1277 .features[FEAT_8000_0001_EDX] =
1278 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1279 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1280 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1281 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1282 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1283 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1284 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1285 .features[FEAT_8000_0001_ECX] =
1286 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1287 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1288 .xlevel = 0x80000008,
1289 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1292 .name = "Opteron_G4",
1293 .level = 0xd,
1294 .vendor = CPUID_VENDOR_AMD,
1295 .family = 21,
1296 .model = 1,
1297 .stepping = 2,
1298 .features[FEAT_1_EDX] =
1299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1303 CPUID_DE | CPUID_FP87,
1304 .features[FEAT_1_ECX] =
1305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1306 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1307 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1308 CPUID_EXT_SSE3,
1309 .features[FEAT_8000_0001_EDX] =
1310 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1311 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1312 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1313 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1314 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1315 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1316 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .features[FEAT_8000_0001_ECX] =
1318 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1319 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1320 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1321 CPUID_EXT3_LAHF_LM,
1322 /* no xsaveopt! */
1323 .xlevel = 0x8000001A,
1324 .model_id = "AMD Opteron 62xx class CPU",
1327 .name = "Opteron_G5",
1328 .level = 0xd,
1329 .vendor = CPUID_VENDOR_AMD,
1330 .family = 21,
1331 .model = 2,
1332 .stepping = 0,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1341 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1342 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1343 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1344 .features[FEAT_8000_0001_EDX] =
1345 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1346 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1347 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1348 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1349 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1350 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1351 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1352 .features[FEAT_8000_0001_ECX] =
1353 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1354 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1355 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1356 CPUID_EXT3_LAHF_LM,
1357 /* no xsaveopt! */
1358 .xlevel = 0x8000001A,
1359 .model_id = "AMD Opteron 63xx class CPU",
1364 * x86_cpu_compat_set_features:
1365 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1366 * @w: Identifies the feature word to be changed.
1367 * @feat_add: Feature bits to be added to feature word
1368 * @feat_remove: Feature bits to be removed from feature word
1370 * Change CPU model feature bits for compatibility.
1372 * This function may be used by machine-type compatibility functions
1373 * to enable or disable feature bits on specific CPU models.
1375 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1376 uint32_t feat_add, uint32_t feat_remove)
1378 X86CPUDefinition *def;
1379 int i;
1380 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1381 def = &builtin_x86_defs[i];
1382 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1383 def->features[w] |= feat_add;
1384 def->features[w] &= ~feat_remove;
1389 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1390 bool migratable_only);
1392 #ifdef CONFIG_KVM
1394 static int cpu_x86_fill_model_id(char *str)
1396 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1397 int i;
1399 for (i = 0; i < 3; i++) {
1400 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1401 memcpy(str + i * 16 + 0, &eax, 4);
1402 memcpy(str + i * 16 + 4, &ebx, 4);
1403 memcpy(str + i * 16 + 8, &ecx, 4);
1404 memcpy(str + i * 16 + 12, &edx, 4);
1406 return 0;
1409 static X86CPUDefinition host_cpudef;
1411 static Property host_x86_cpu_properties[] = {
1412 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1413 DEFINE_PROP_END_OF_LIST()
1416 /* class_init for the "host" CPU model
1418 * This function may be called before KVM is initialized.
1420 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1422 DeviceClass *dc = DEVICE_CLASS(oc);
1423 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1424 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1426 xcc->kvm_required = true;
1428 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1429 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1431 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1432 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1433 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1434 host_cpudef.stepping = eax & 0x0F;
1436 cpu_x86_fill_model_id(host_cpudef.model_id);
1438 xcc->cpu_def = &host_cpudef;
1439 host_cpudef.cache_info_passthrough = true;
1441 /* level, xlevel, xlevel2, and the feature words are initialized on
1442 * instance_init, because they require KVM to be initialized.
1445 dc->props = host_x86_cpu_properties;
1448 static void host_x86_cpu_initfn(Object *obj)
1450 X86CPU *cpu = X86_CPU(obj);
1451 CPUX86State *env = &cpu->env;
1452 KVMState *s = kvm_state;
1454 assert(kvm_enabled());
1456 /* We can't fill the features array here because we don't know yet if
1457 * "migratable" is true or false.
1459 cpu->host_features = true;
1461 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1462 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1463 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1465 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1468 static const TypeInfo host_x86_cpu_type_info = {
1469 .name = X86_CPU_TYPE_NAME("host"),
1470 .parent = TYPE_X86_CPU,
1471 .instance_init = host_x86_cpu_initfn,
1472 .class_init = host_x86_cpu_class_init,
1475 #endif
1477 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1479 FeatureWordInfo *f = &feature_word_info[w];
1480 int i;
1482 for (i = 0; i < 32; ++i) {
1483 if (1 << i & mask) {
1484 const char *reg = get_register_name_32(f->cpuid_reg);
1485 assert(reg);
1486 fprintf(stderr, "warning: %s doesn't support requested feature: "
1487 "CPUID.%02XH:%s%s%s [bit %d]\n",
1488 kvm_enabled() ? "host" : "TCG",
1489 f->cpuid_eax, reg,
1490 f->feat_names[i] ? "." : "",
1491 f->feat_names[i] ? f->feat_names[i] : "", i);
1496 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1497 const char *name, Error **errp)
1499 X86CPU *cpu = X86_CPU(obj);
1500 CPUX86State *env = &cpu->env;
1501 int64_t value;
1503 value = (env->cpuid_version >> 8) & 0xf;
1504 if (value == 0xf) {
1505 value += (env->cpuid_version >> 20) & 0xff;
1507 visit_type_int(v, &value, name, errp);
1510 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1511 const char *name, Error **errp)
1513 X86CPU *cpu = X86_CPU(obj);
1514 CPUX86State *env = &cpu->env;
1515 const int64_t min = 0;
1516 const int64_t max = 0xff + 0xf;
1517 Error *local_err = NULL;
1518 int64_t value;
1520 visit_type_int(v, &value, name, &local_err);
1521 if (local_err) {
1522 error_propagate(errp, local_err);
1523 return;
1525 if (value < min || value > max) {
1526 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1527 name ? name : "null", value, min, max);
1528 return;
1531 env->cpuid_version &= ~0xff00f00;
1532 if (value > 0x0f) {
1533 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1534 } else {
1535 env->cpuid_version |= value << 8;
1539 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1540 const char *name, Error **errp)
1542 X86CPU *cpu = X86_CPU(obj);
1543 CPUX86State *env = &cpu->env;
1544 int64_t value;
1546 value = (env->cpuid_version >> 4) & 0xf;
1547 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1548 visit_type_int(v, &value, name, errp);
1551 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1552 const char *name, Error **errp)
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 const int64_t min = 0;
1557 const int64_t max = 0xff;
1558 Error *local_err = NULL;
1559 int64_t value;
1561 visit_type_int(v, &value, name, &local_err);
1562 if (local_err) {
1563 error_propagate(errp, local_err);
1564 return;
1566 if (value < min || value > max) {
1567 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1568 name ? name : "null", value, min, max);
1569 return;
1572 env->cpuid_version &= ~0xf00f0;
1573 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1576 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1577 void *opaque, const char *name,
1578 Error **errp)
1580 X86CPU *cpu = X86_CPU(obj);
1581 CPUX86State *env = &cpu->env;
1582 int64_t value;
1584 value = env->cpuid_version & 0xf;
1585 visit_type_int(v, &value, name, errp);
1588 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1589 void *opaque, const char *name,
1590 Error **errp)
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 const int64_t min = 0;
1595 const int64_t max = 0xf;
1596 Error *local_err = NULL;
1597 int64_t value;
1599 visit_type_int(v, &value, name, &local_err);
1600 if (local_err) {
1601 error_propagate(errp, local_err);
1602 return;
1604 if (value < min || value > max) {
1605 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1606 name ? name : "null", value, min, max);
1607 return;
1610 env->cpuid_version &= ~0xf;
1611 env->cpuid_version |= value & 0xf;
1614 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1616 X86CPU *cpu = X86_CPU(obj);
1617 CPUX86State *env = &cpu->env;
1618 char *value;
1620 value = g_malloc(CPUID_VENDOR_SZ + 1);
1621 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1622 env->cpuid_vendor3);
1623 return value;
1626 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1627 Error **errp)
1629 X86CPU *cpu = X86_CPU(obj);
1630 CPUX86State *env = &cpu->env;
1631 int i;
1633 if (strlen(value) != CPUID_VENDOR_SZ) {
1634 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1635 "vendor", value);
1636 return;
1639 env->cpuid_vendor1 = 0;
1640 env->cpuid_vendor2 = 0;
1641 env->cpuid_vendor3 = 0;
1642 for (i = 0; i < 4; i++) {
1643 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1644 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1645 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1649 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1651 X86CPU *cpu = X86_CPU(obj);
1652 CPUX86State *env = &cpu->env;
1653 char *value;
1654 int i;
1656 value = g_malloc(48 + 1);
1657 for (i = 0; i < 48; i++) {
1658 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1660 value[48] = '\0';
1661 return value;
1664 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1665 Error **errp)
1667 X86CPU *cpu = X86_CPU(obj);
1668 CPUX86State *env = &cpu->env;
1669 int c, len, i;
1671 if (model_id == NULL) {
1672 model_id = "";
1674 len = strlen(model_id);
1675 memset(env->cpuid_model, 0, 48);
1676 for (i = 0; i < 48; i++) {
1677 if (i >= len) {
1678 c = '\0';
1679 } else {
1680 c = (uint8_t)model_id[i];
1682 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1686 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1687 const char *name, Error **errp)
1689 X86CPU *cpu = X86_CPU(obj);
1690 int64_t value;
1692 value = cpu->env.tsc_khz * 1000;
1693 visit_type_int(v, &value, name, errp);
1696 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1697 const char *name, Error **errp)
1699 X86CPU *cpu = X86_CPU(obj);
1700 const int64_t min = 0;
1701 const int64_t max = INT64_MAX;
1702 Error *local_err = NULL;
1703 int64_t value;
1705 visit_type_int(v, &value, name, &local_err);
1706 if (local_err) {
1707 error_propagate(errp, local_err);
1708 return;
1710 if (value < min || value > max) {
1711 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1712 name ? name : "null", value, min, max);
1713 return;
1716 cpu->env.tsc_khz = value / 1000;
1719 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1720 const char *name, Error **errp)
1722 X86CPU *cpu = X86_CPU(obj);
1723 int64_t value = cpu->apic_id;
1725 visit_type_int(v, &value, name, errp);
1728 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1729 const char *name, Error **errp)
1731 X86CPU *cpu = X86_CPU(obj);
1732 DeviceState *dev = DEVICE(obj);
1733 const int64_t min = 0;
1734 const int64_t max = UINT32_MAX;
1735 Error *error = NULL;
1736 int64_t value;
1738 if (dev->realized) {
1739 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1740 "it was realized", name, object_get_typename(obj));
1741 return;
1744 visit_type_int(v, &value, name, &error);
1745 if (error) {
1746 error_propagate(errp, error);
1747 return;
1749 if (value < min || value > max) {
1750 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1751 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1752 object_get_typename(obj), name, value, min, max);
1753 return;
1756 if ((value != cpu->apic_id) && cpu_exists(value)) {
1757 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1758 return;
1760 cpu->apic_id = value;
1763 /* Generic getter for "feature-words" and "filtered-features" properties */
1764 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1765 const char *name, Error **errp)
1767 uint32_t *array = (uint32_t *)opaque;
1768 FeatureWord w;
1769 Error *err = NULL;
1770 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1771 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1772 X86CPUFeatureWordInfoList *list = NULL;
1774 for (w = 0; w < FEATURE_WORDS; w++) {
1775 FeatureWordInfo *wi = &feature_word_info[w];
1776 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1777 qwi->cpuid_input_eax = wi->cpuid_eax;
1778 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1779 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1780 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1781 qwi->features = array[w];
1783 /* List will be in reverse order, but order shouldn't matter */
1784 list_entries[w].next = list;
1785 list_entries[w].value = &word_infos[w];
1786 list = &list_entries[w];
1789 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1790 error_propagate(errp, err);
1793 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1794 const char *name, Error **errp)
1796 X86CPU *cpu = X86_CPU(obj);
1797 int64_t value = cpu->hyperv_spinlock_attempts;
1799 visit_type_int(v, &value, name, errp);
1802 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1803 const char *name, Error **errp)
1805 const int64_t min = 0xFFF;
1806 const int64_t max = UINT_MAX;
1807 X86CPU *cpu = X86_CPU(obj);
1808 Error *err = NULL;
1809 int64_t value;
1811 visit_type_int(v, &value, name, &err);
1812 if (err) {
1813 error_propagate(errp, err);
1814 return;
1817 if (value < min || value > max) {
1818 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1819 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1820 object_get_typename(obj), name ? name : "null",
1821 value, min, max);
1822 return;
1824 cpu->hyperv_spinlock_attempts = value;
1827 static PropertyInfo qdev_prop_spinlocks = {
1828 .name = "int",
1829 .get = x86_get_hv_spinlocks,
1830 .set = x86_set_hv_spinlocks,
1833 /* Convert all '_' in a feature string option name to '-', to make feature
1834 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1836 static inline void feat2prop(char *s)
1838 while ((s = strchr(s, '_'))) {
1839 *s = '-';
1843 /* Parse "+feature,-feature,feature=foo" CPU feature string
1845 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1846 Error **errp)
1848 X86CPU *cpu = X86_CPU(cs);
1849 char *featurestr; /* Single 'key=value" string being parsed */
1850 FeatureWord w;
1851 /* Features to be added */
1852 FeatureWordArray plus_features = { 0 };
1853 /* Features to be removed */
1854 FeatureWordArray minus_features = { 0 };
1855 uint32_t numvalue;
1856 CPUX86State *env = &cpu->env;
1857 Error *local_err = NULL;
1859 featurestr = features ? strtok(features, ",") : NULL;
1861 while (featurestr) {
1862 char *val;
1863 if (featurestr[0] == '+') {
1864 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1865 } else if (featurestr[0] == '-') {
1866 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1867 } else if ((val = strchr(featurestr, '='))) {
1868 *val = 0; val++;
1869 feat2prop(featurestr);
1870 if (!strcmp(featurestr, "xlevel")) {
1871 char *err;
1872 char num[32];
1874 numvalue = strtoul(val, &err, 0);
1875 if (!*val || *err) {
1876 error_setg(errp, "bad numerical value %s", val);
1877 return;
1879 if (numvalue < 0x80000000) {
1880 error_report("xlevel value shall always be >= 0x80000000"
1881 ", fixup will be removed in future versions");
1882 numvalue += 0x80000000;
1884 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1885 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1886 } else if (!strcmp(featurestr, "tsc-freq")) {
1887 int64_t tsc_freq;
1888 char *err;
1889 char num[32];
1891 tsc_freq = strtosz_suffix_unit(val, &err,
1892 STRTOSZ_DEFSUFFIX_B, 1000);
1893 if (tsc_freq < 0 || *err) {
1894 error_setg(errp, "bad numerical value %s", val);
1895 return;
1897 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1898 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1899 &local_err);
1900 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1901 char *err;
1902 const int min = 0xFFF;
1903 char num[32];
1904 numvalue = strtoul(val, &err, 0);
1905 if (!*val || *err) {
1906 error_setg(errp, "bad numerical value %s", val);
1907 return;
1909 if (numvalue < min) {
1910 error_report("hv-spinlocks value shall always be >= 0x%x"
1911 ", fixup will be removed in future versions",
1912 min);
1913 numvalue = min;
1915 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1916 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1917 } else {
1918 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1920 } else {
1921 feat2prop(featurestr);
1922 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1924 if (local_err) {
1925 error_propagate(errp, local_err);
1926 return;
1928 featurestr = strtok(NULL, ",");
1931 if (cpu->host_features) {
1932 for (w = 0; w < FEATURE_WORDS; w++) {
1933 env->features[w] =
1934 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1938 for (w = 0; w < FEATURE_WORDS; w++) {
1939 env->features[w] |= plus_features[w];
1940 env->features[w] &= ~minus_features[w];
1944 /* Print all cpuid feature names in featureset
1946 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1948 int bit;
1949 bool first = true;
1951 for (bit = 0; bit < 32; bit++) {
1952 if (featureset[bit]) {
1953 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1954 first = false;
1959 /* generate CPU information. */
1960 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1962 X86CPUDefinition *def;
1963 char buf[256];
1964 int i;
1966 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1967 def = &builtin_x86_defs[i];
1968 snprintf(buf, sizeof(buf), "%s", def->name);
1969 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1971 #ifdef CONFIG_KVM
1972 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1973 "KVM processor with all supported host features "
1974 "(only available in KVM mode)");
1975 #endif
1977 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1978 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1979 FeatureWordInfo *fw = &feature_word_info[i];
1981 (*cpu_fprintf)(f, " ");
1982 listflags(f, cpu_fprintf, fw->feat_names);
1983 (*cpu_fprintf)(f, "\n");
1987 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1989 CpuDefinitionInfoList *cpu_list = NULL;
1990 X86CPUDefinition *def;
1991 int i;
1993 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1994 CpuDefinitionInfoList *entry;
1995 CpuDefinitionInfo *info;
1997 def = &builtin_x86_defs[i];
1998 info = g_malloc0(sizeof(*info));
1999 info->name = g_strdup(def->name);
2001 entry = g_malloc0(sizeof(*entry));
2002 entry->value = info;
2003 entry->next = cpu_list;
2004 cpu_list = entry;
2007 return cpu_list;
2010 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2011 bool migratable_only)
2013 FeatureWordInfo *wi = &feature_word_info[w];
2014 uint32_t r;
2016 if (kvm_enabled()) {
2017 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2018 wi->cpuid_ecx,
2019 wi->cpuid_reg);
2020 } else if (tcg_enabled()) {
2021 r = wi->tcg_features;
2022 } else {
2023 return ~0;
2025 if (migratable_only) {
2026 r &= x86_cpu_get_migratable_flags(w);
2028 return r;
2032 * Filters CPU feature words based on host availability of each feature.
2034 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2036 static int x86_cpu_filter_features(X86CPU *cpu)
2038 CPUX86State *env = &cpu->env;
2039 FeatureWord w;
2040 int rv = 0;
2042 for (w = 0; w < FEATURE_WORDS; w++) {
2043 uint32_t host_feat =
2044 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2045 uint32_t requested_features = env->features[w];
2046 env->features[w] &= host_feat;
2047 cpu->filtered_features[w] = requested_features & ~env->features[w];
2048 if (cpu->filtered_features[w]) {
2049 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2050 report_unavailable_features(w, cpu->filtered_features[w]);
2052 rv = 1;
2056 return rv;
2059 /* Load data from X86CPUDefinition
2061 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2063 CPUX86State *env = &cpu->env;
2064 const char *vendor;
2065 char host_vendor[CPUID_VENDOR_SZ + 1];
2066 FeatureWord w;
2068 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2069 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2070 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2071 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2072 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2073 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2074 cpu->cache_info_passthrough = def->cache_info_passthrough;
2075 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2076 for (w = 0; w < FEATURE_WORDS; w++) {
2077 env->features[w] = def->features[w];
2080 /* Special cases not set in the X86CPUDefinition structs: */
2081 if (kvm_enabled()) {
2082 FeatureWord w;
2083 for (w = 0; w < FEATURE_WORDS; w++) {
2084 env->features[w] |= kvm_default_features[w];
2085 env->features[w] &= ~kvm_default_unset_features[w];
2089 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2091 /* sysenter isn't supported in compatibility mode on AMD,
2092 * syscall isn't supported in compatibility mode on Intel.
2093 * Normally we advertise the actual CPU vendor, but you can
2094 * override this using the 'vendor' property if you want to use
2095 * KVM's sysenter/syscall emulation in compatibility mode and
2096 * when doing cross vendor migration
2098 vendor = def->vendor;
2099 if (kvm_enabled()) {
2100 uint32_t ebx = 0, ecx = 0, edx = 0;
2101 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2102 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2103 vendor = host_vendor;
2106 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2110 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2112 X86CPU *cpu = NULL;
2113 X86CPUClass *xcc;
2114 ObjectClass *oc;
2115 gchar **model_pieces;
2116 char *name, *features;
2117 Error *error = NULL;
2119 model_pieces = g_strsplit(cpu_model, ",", 2);
2120 if (!model_pieces[0]) {
2121 error_setg(&error, "Invalid/empty CPU model name");
2122 goto out;
2124 name = model_pieces[0];
2125 features = model_pieces[1];
2127 oc = x86_cpu_class_by_name(name);
2128 if (oc == NULL) {
2129 error_setg(&error, "Unable to find CPU definition: %s", name);
2130 goto out;
2132 xcc = X86_CPU_CLASS(oc);
2134 if (xcc->kvm_required && !kvm_enabled()) {
2135 error_setg(&error, "CPU model '%s' requires KVM", name);
2136 goto out;
2139 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2141 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2142 if (error) {
2143 goto out;
2146 out:
2147 if (error != NULL) {
2148 error_propagate(errp, error);
2149 if (cpu) {
2150 object_unref(OBJECT(cpu));
2151 cpu = NULL;
2154 g_strfreev(model_pieces);
2155 return cpu;
2158 X86CPU *cpu_x86_init(const char *cpu_model)
2160 Error *error = NULL;
2161 X86CPU *cpu;
2163 cpu = cpu_x86_create(cpu_model, &error);
2164 if (error) {
2165 goto out;
2168 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2170 out:
2171 if (error) {
2172 error_report_err(error);
2173 if (cpu != NULL) {
2174 object_unref(OBJECT(cpu));
2175 cpu = NULL;
2178 return cpu;
2181 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2183 X86CPUDefinition *cpudef = data;
2184 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2186 xcc->cpu_def = cpudef;
2189 static void x86_register_cpudef_type(X86CPUDefinition *def)
2191 char *typename = x86_cpu_type_name(def->name);
2192 TypeInfo ti = {
2193 .name = typename,
2194 .parent = TYPE_X86_CPU,
2195 .class_init = x86_cpu_cpudef_class_init,
2196 .class_data = def,
2199 type_register(&ti);
2200 g_free(typename);
2203 #if !defined(CONFIG_USER_ONLY)
2205 void cpu_clear_apic_feature(CPUX86State *env)
2207 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2210 #endif /* !CONFIG_USER_ONLY */
2212 /* Initialize list of CPU models, filling some non-static fields if necessary
2214 void x86_cpudef_setup(void)
2216 int i, j;
2217 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2219 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2220 X86CPUDefinition *def = &builtin_x86_defs[i];
2222 /* Look for specific "cpudef" models that */
2223 /* have the QEMU version in .model_id */
2224 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2225 if (strcmp(model_with_versions[j], def->name) == 0) {
2226 pstrcpy(def->model_id, sizeof(def->model_id),
2227 "QEMU Virtual CPU version ");
2228 pstrcat(def->model_id, sizeof(def->model_id),
2229 qemu_get_version());
2230 break;
2236 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2237 uint32_t *eax, uint32_t *ebx,
2238 uint32_t *ecx, uint32_t *edx)
2240 X86CPU *cpu = x86_env_get_cpu(env);
2241 CPUState *cs = CPU(cpu);
2243 /* test if maximum index reached */
2244 if (index & 0x80000000) {
2245 if (index > env->cpuid_xlevel) {
2246 if (env->cpuid_xlevel2 > 0) {
2247 /* Handle the Centaur's CPUID instruction. */
2248 if (index > env->cpuid_xlevel2) {
2249 index = env->cpuid_xlevel2;
2250 } else if (index < 0xC0000000) {
2251 index = env->cpuid_xlevel;
2253 } else {
2254 /* Intel documentation states that invalid EAX input will
2255 * return the same information as EAX=cpuid_level
2256 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2258 index = env->cpuid_level;
2261 } else {
2262 if (index > env->cpuid_level)
2263 index = env->cpuid_level;
2266 switch(index) {
2267 case 0:
2268 *eax = env->cpuid_level;
2269 *ebx = env->cpuid_vendor1;
2270 *edx = env->cpuid_vendor2;
2271 *ecx = env->cpuid_vendor3;
2272 break;
2273 case 1:
2274 *eax = env->cpuid_version;
2275 *ebx = (cpu->apic_id << 24) |
2276 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2277 *ecx = env->features[FEAT_1_ECX];
2278 *edx = env->features[FEAT_1_EDX];
2279 if (cs->nr_cores * cs->nr_threads > 1) {
2280 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2281 *edx |= 1 << 28; /* HTT bit */
2283 break;
2284 case 2:
2285 /* cache info: needed for Pentium Pro compatibility */
2286 if (cpu->cache_info_passthrough) {
2287 host_cpuid(index, 0, eax, ebx, ecx, edx);
2288 break;
2290 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2291 *ebx = 0;
2292 *ecx = 0;
2293 *edx = (L1D_DESCRIPTOR << 16) | \
2294 (L1I_DESCRIPTOR << 8) | \
2295 (L2_DESCRIPTOR);
2296 break;
2297 case 4:
2298 /* cache info: needed for Core compatibility */
2299 if (cpu->cache_info_passthrough) {
2300 host_cpuid(index, count, eax, ebx, ecx, edx);
2301 *eax &= ~0xFC000000;
2302 } else {
2303 *eax = 0;
2304 switch (count) {
2305 case 0: /* L1 dcache info */
2306 *eax |= CPUID_4_TYPE_DCACHE | \
2307 CPUID_4_LEVEL(1) | \
2308 CPUID_4_SELF_INIT_LEVEL;
2309 *ebx = (L1D_LINE_SIZE - 1) | \
2310 ((L1D_PARTITIONS - 1) << 12) | \
2311 ((L1D_ASSOCIATIVITY - 1) << 22);
2312 *ecx = L1D_SETS - 1;
2313 *edx = CPUID_4_NO_INVD_SHARING;
2314 break;
2315 case 1: /* L1 icache info */
2316 *eax |= CPUID_4_TYPE_ICACHE | \
2317 CPUID_4_LEVEL(1) | \
2318 CPUID_4_SELF_INIT_LEVEL;
2319 *ebx = (L1I_LINE_SIZE - 1) | \
2320 ((L1I_PARTITIONS - 1) << 12) | \
2321 ((L1I_ASSOCIATIVITY - 1) << 22);
2322 *ecx = L1I_SETS - 1;
2323 *edx = CPUID_4_NO_INVD_SHARING;
2324 break;
2325 case 2: /* L2 cache info */
2326 *eax |= CPUID_4_TYPE_UNIFIED | \
2327 CPUID_4_LEVEL(2) | \
2328 CPUID_4_SELF_INIT_LEVEL;
2329 if (cs->nr_threads > 1) {
2330 *eax |= (cs->nr_threads - 1) << 14;
2332 *ebx = (L2_LINE_SIZE - 1) | \
2333 ((L2_PARTITIONS - 1) << 12) | \
2334 ((L2_ASSOCIATIVITY - 1) << 22);
2335 *ecx = L2_SETS - 1;
2336 *edx = CPUID_4_NO_INVD_SHARING;
2337 break;
2338 default: /* end of info */
2339 *eax = 0;
2340 *ebx = 0;
2341 *ecx = 0;
2342 *edx = 0;
2343 break;
2347 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2348 if ((*eax & 31) && cs->nr_cores > 1) {
2349 *eax |= (cs->nr_cores - 1) << 26;
2351 break;
2352 case 5:
2353 /* mwait info: needed for Core compatibility */
2354 *eax = 0; /* Smallest monitor-line size in bytes */
2355 *ebx = 0; /* Largest monitor-line size in bytes */
2356 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2357 *edx = 0;
2358 break;
2359 case 6:
2360 /* Thermal and Power Leaf */
2361 *eax = 0;
2362 *ebx = 0;
2363 *ecx = 0;
2364 *edx = 0;
2365 break;
2366 case 7:
2367 /* Structured Extended Feature Flags Enumeration Leaf */
2368 if (count == 0) {
2369 *eax = 0; /* Maximum ECX value for sub-leaves */
2370 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2371 *ecx = 0; /* Reserved */
2372 *edx = 0; /* Reserved */
2373 } else {
2374 *eax = 0;
2375 *ebx = 0;
2376 *ecx = 0;
2377 *edx = 0;
2379 break;
2380 case 9:
2381 /* Direct Cache Access Information Leaf */
2382 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2383 *ebx = 0;
2384 *ecx = 0;
2385 *edx = 0;
2386 break;
2387 case 0xA:
2388 /* Architectural Performance Monitoring Leaf */
2389 if (kvm_enabled() && cpu->enable_pmu) {
2390 KVMState *s = cs->kvm_state;
2392 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2393 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2394 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2395 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2396 } else {
2397 *eax = 0;
2398 *ebx = 0;
2399 *ecx = 0;
2400 *edx = 0;
2402 break;
2403 case 0xD: {
2404 KVMState *s = cs->kvm_state;
2405 uint64_t kvm_mask;
2406 int i;
2408 /* Processor Extended State */
2409 *eax = 0;
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2414 break;
2416 kvm_mask =
2417 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2418 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2420 if (count == 0) {
2421 *ecx = 0x240;
2422 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2423 const ExtSaveArea *esa = &ext_save_areas[i];
2424 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2425 (kvm_mask & (1 << i)) != 0) {
2426 if (i < 32) {
2427 *eax |= 1 << i;
2428 } else {
2429 *edx |= 1 << (i - 32);
2431 *ecx = MAX(*ecx, esa->offset + esa->size);
2434 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2435 *ebx = *ecx;
2436 } else if (count == 1) {
2437 *eax = env->features[FEAT_XSAVE];
2438 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2439 const ExtSaveArea *esa = &ext_save_areas[count];
2440 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2441 (kvm_mask & (1 << count)) != 0) {
2442 *eax = esa->size;
2443 *ebx = esa->offset;
2446 break;
2448 case 0x80000000:
2449 *eax = env->cpuid_xlevel;
2450 *ebx = env->cpuid_vendor1;
2451 *edx = env->cpuid_vendor2;
2452 *ecx = env->cpuid_vendor3;
2453 break;
2454 case 0x80000001:
2455 *eax = env->cpuid_version;
2456 *ebx = 0;
2457 *ecx = env->features[FEAT_8000_0001_ECX];
2458 *edx = env->features[FEAT_8000_0001_EDX];
2460 /* The Linux kernel checks for the CMPLegacy bit and
2461 * discards multiple thread information if it is set.
2462 * So dont set it here for Intel to make Linux guests happy.
2464 if (cs->nr_cores * cs->nr_threads > 1) {
2465 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2466 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2467 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2468 *ecx |= 1 << 1; /* CmpLegacy bit */
2471 break;
2472 case 0x80000002:
2473 case 0x80000003:
2474 case 0x80000004:
2475 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2476 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2477 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2478 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2479 break;
2480 case 0x80000005:
2481 /* cache info (L1 cache) */
2482 if (cpu->cache_info_passthrough) {
2483 host_cpuid(index, 0, eax, ebx, ecx, edx);
2484 break;
2486 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2487 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2488 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2489 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2490 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2491 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2492 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2493 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2494 break;
2495 case 0x80000006:
2496 /* cache info (L2 cache) */
2497 if (cpu->cache_info_passthrough) {
2498 host_cpuid(index, 0, eax, ebx, ecx, edx);
2499 break;
2501 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2502 (L2_DTLB_2M_ENTRIES << 16) | \
2503 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2504 (L2_ITLB_2M_ENTRIES);
2505 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2506 (L2_DTLB_4K_ENTRIES << 16) | \
2507 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2508 (L2_ITLB_4K_ENTRIES);
2509 *ecx = (L2_SIZE_KB_AMD << 16) | \
2510 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2511 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2512 *edx = ((L3_SIZE_KB/512) << 18) | \
2513 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2514 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2515 break;
2516 case 0x80000007:
2517 *eax = 0;
2518 *ebx = 0;
2519 *ecx = 0;
2520 *edx = env->features[FEAT_8000_0007_EDX];
2521 break;
2522 case 0x80000008:
2523 /* virtual & phys address size in low 2 bytes. */
2524 /* XXX: This value must match the one used in the MMU code. */
2525 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2526 /* 64 bit processor */
2527 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2528 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2529 } else {
2530 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2531 *eax = 0x00000024; /* 36 bits physical */
2532 } else {
2533 *eax = 0x00000020; /* 32 bits physical */
2536 *ebx = 0;
2537 *ecx = 0;
2538 *edx = 0;
2539 if (cs->nr_cores * cs->nr_threads > 1) {
2540 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2542 break;
2543 case 0x8000000A:
2544 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2545 *eax = 0x00000001; /* SVM Revision */
2546 *ebx = 0x00000010; /* nr of ASIDs */
2547 *ecx = 0;
2548 *edx = env->features[FEAT_SVM]; /* optional features */
2549 } else {
2550 *eax = 0;
2551 *ebx = 0;
2552 *ecx = 0;
2553 *edx = 0;
2555 break;
2556 case 0xC0000000:
2557 *eax = env->cpuid_xlevel2;
2558 *ebx = 0;
2559 *ecx = 0;
2560 *edx = 0;
2561 break;
2562 case 0xC0000001:
2563 /* Support for VIA CPU's CPUID instruction */
2564 *eax = env->cpuid_version;
2565 *ebx = 0;
2566 *ecx = 0;
2567 *edx = env->features[FEAT_C000_0001_EDX];
2568 break;
2569 case 0xC0000002:
2570 case 0xC0000003:
2571 case 0xC0000004:
2572 /* Reserved for the future, and now filled with zero */
2573 *eax = 0;
2574 *ebx = 0;
2575 *ecx = 0;
2576 *edx = 0;
2577 break;
2578 default:
2579 /* reserved values: zero */
2580 *eax = 0;
2581 *ebx = 0;
2582 *ecx = 0;
2583 *edx = 0;
2584 break;
2588 /* CPUClass::reset() */
2589 static void x86_cpu_reset(CPUState *s)
2591 X86CPU *cpu = X86_CPU(s);
2592 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2593 CPUX86State *env = &cpu->env;
2594 int i;
2596 xcc->parent_reset(s);
2598 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2600 tlb_flush(s, 1);
2602 env->old_exception = -1;
2604 /* init to reset state */
2606 #ifdef CONFIG_SOFTMMU
2607 env->hflags |= HF_SOFTMMU_MASK;
2608 #endif
2609 env->hflags2 |= HF2_GIF_MASK;
2611 cpu_x86_update_cr0(env, 0x60000010);
2612 env->a20_mask = ~0x0;
2613 env->smbase = 0x30000;
2615 env->idt.limit = 0xffff;
2616 env->gdt.limit = 0xffff;
2617 env->ldt.limit = 0xffff;
2618 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2619 env->tr.limit = 0xffff;
2620 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2622 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2623 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2624 DESC_R_MASK | DESC_A_MASK);
2625 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2626 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2627 DESC_A_MASK);
2628 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2629 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2630 DESC_A_MASK);
2631 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2632 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2633 DESC_A_MASK);
2634 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2635 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2636 DESC_A_MASK);
2637 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2638 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2639 DESC_A_MASK);
2641 env->eip = 0xfff0;
2642 env->regs[R_EDX] = env->cpuid_version;
2644 env->eflags = 0x2;
2646 /* FPU init */
2647 for (i = 0; i < 8; i++) {
2648 env->fptags[i] = 1;
2650 cpu_set_fpuc(env, 0x37f);
2652 env->mxcsr = 0x1f80;
2653 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2655 env->pat = 0x0007040600070406ULL;
2656 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2658 memset(env->dr, 0, sizeof(env->dr));
2659 env->dr[6] = DR6_FIXED_1;
2660 env->dr[7] = DR7_FIXED_1;
2661 cpu_breakpoint_remove_all(s, BP_CPU);
2662 cpu_watchpoint_remove_all(s, BP_CPU);
2664 env->xcr0 = 1;
2667 * SDM 11.11.5 requires:
2668 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2669 * - IA32_MTRR_PHYSMASKn.V = 0
2670 * All other bits are undefined. For simplification, zero it all.
2672 env->mtrr_deftype = 0;
2673 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2674 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2676 #if !defined(CONFIG_USER_ONLY)
2677 /* We hard-wire the BSP to the first CPU. */
2678 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2680 s->halted = !cpu_is_bsp(cpu);
2682 if (kvm_enabled()) {
2683 kvm_arch_reset_vcpu(cpu);
2685 #endif
2688 #ifndef CONFIG_USER_ONLY
2689 bool cpu_is_bsp(X86CPU *cpu)
2691 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2694 /* TODO: remove me, when reset over QOM tree is implemented */
2695 static void x86_cpu_machine_reset_cb(void *opaque)
2697 X86CPU *cpu = opaque;
2698 cpu_reset(CPU(cpu));
2700 #endif
2702 static void mce_init(X86CPU *cpu)
2704 CPUX86State *cenv = &cpu->env;
2705 unsigned int bank;
2707 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2708 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2709 (CPUID_MCE | CPUID_MCA)) {
2710 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2711 cenv->mcg_ctl = ~(uint64_t)0;
2712 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2713 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2718 #ifndef CONFIG_USER_ONLY
2719 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2721 DeviceState *dev = DEVICE(cpu);
2722 APICCommonState *apic;
2723 const char *apic_type = "apic";
2725 if (kvm_irqchip_in_kernel()) {
2726 apic_type = "kvm-apic";
2727 } else if (xen_enabled()) {
2728 apic_type = "xen-apic";
2731 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2732 if (cpu->apic_state == NULL) {
2733 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2734 return;
2737 object_property_add_child(OBJECT(cpu), "apic",
2738 OBJECT(cpu->apic_state), NULL);
2739 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2740 /* TODO: convert to link<> */
2741 apic = APIC_COMMON(cpu->apic_state);
2742 apic->cpu = cpu;
2745 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2747 if (cpu->apic_state == NULL) {
2748 return;
2750 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2751 errp);
2753 #else
2754 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2757 #endif
2760 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2761 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2762 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2763 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2764 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2765 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2766 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2768 CPUState *cs = CPU(dev);
2769 X86CPU *cpu = X86_CPU(dev);
2770 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2771 CPUX86State *env = &cpu->env;
2772 Error *local_err = NULL;
2773 static bool ht_warned;
2775 if (cpu->apic_id < 0) {
2776 error_setg(errp, "apic-id property was not initialized properly");
2777 return;
2780 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2781 env->cpuid_level = 7;
2784 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2785 * CPUID[1].EDX.
2787 if (IS_AMD_CPU(env)) {
2788 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2789 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2790 & CPUID_EXT2_AMD_ALIASES);
2794 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2795 error_setg(&local_err,
2796 kvm_enabled() ?
2797 "Host doesn't support requested features" :
2798 "TCG doesn't support requested features");
2799 goto out;
2802 #ifndef CONFIG_USER_ONLY
2803 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2805 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2806 x86_cpu_apic_create(cpu, &local_err);
2807 if (local_err != NULL) {
2808 goto out;
2811 #endif
2813 mce_init(cpu);
2814 qemu_init_vcpu(cs);
2816 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2817 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2818 * based on inputs (sockets,cores,threads), it is still better to gives
2819 * users a warning.
2821 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2822 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2824 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2825 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2826 " -smp options properly.");
2827 ht_warned = true;
2830 x86_cpu_apic_realize(cpu, &local_err);
2831 if (local_err != NULL) {
2832 goto out;
2834 cpu_reset(cs);
2836 xcc->parent_realize(dev, &local_err);
2837 out:
2838 if (local_err != NULL) {
2839 error_propagate(errp, local_err);
2840 return;
2844 static void x86_cpu_initfn(Object *obj)
2846 CPUState *cs = CPU(obj);
2847 X86CPU *cpu = X86_CPU(obj);
2848 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2849 CPUX86State *env = &cpu->env;
2850 static int inited;
2852 cs->env_ptr = env;
2853 cpu_exec_init(env);
2855 object_property_add(obj, "family", "int",
2856 x86_cpuid_version_get_family,
2857 x86_cpuid_version_set_family, NULL, NULL, NULL);
2858 object_property_add(obj, "model", "int",
2859 x86_cpuid_version_get_model,
2860 x86_cpuid_version_set_model, NULL, NULL, NULL);
2861 object_property_add(obj, "stepping", "int",
2862 x86_cpuid_version_get_stepping,
2863 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2864 object_property_add_str(obj, "vendor",
2865 x86_cpuid_get_vendor,
2866 x86_cpuid_set_vendor, NULL);
2867 object_property_add_str(obj, "model-id",
2868 x86_cpuid_get_model_id,
2869 x86_cpuid_set_model_id, NULL);
2870 object_property_add(obj, "tsc-frequency", "int",
2871 x86_cpuid_get_tsc_freq,
2872 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2873 object_property_add(obj, "apic-id", "int",
2874 x86_cpuid_get_apic_id,
2875 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2876 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2877 x86_cpu_get_feature_words,
2878 NULL, NULL, (void *)env->features, NULL);
2879 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2880 x86_cpu_get_feature_words,
2881 NULL, NULL, (void *)cpu->filtered_features, NULL);
2883 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2885 #ifndef CONFIG_USER_ONLY
2886 /* Any code creating new X86CPU objects have to set apic-id explicitly */
2887 cpu->apic_id = -1;
2888 #endif
2890 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2892 /* init various static tables used in TCG mode */
2893 if (tcg_enabled() && !inited) {
2894 inited = 1;
2895 optimize_flags_init();
2899 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2901 X86CPU *cpu = X86_CPU(cs);
2903 return cpu->apic_id;
2906 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2908 X86CPU *cpu = X86_CPU(cs);
2910 return cpu->env.cr[0] & CR0_PG_MASK;
2913 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2915 X86CPU *cpu = X86_CPU(cs);
2917 cpu->env.eip = value;
2920 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2922 X86CPU *cpu = X86_CPU(cs);
2924 cpu->env.eip = tb->pc - tb->cs_base;
2927 static bool x86_cpu_has_work(CPUState *cs)
2929 X86CPU *cpu = X86_CPU(cs);
2930 CPUX86State *env = &cpu->env;
2932 #if !defined(CONFIG_USER_ONLY)
2933 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2934 apic_poll_irq(cpu->apic_state);
2935 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
2937 #endif
2939 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2940 (env->eflags & IF_MASK)) ||
2941 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2942 CPU_INTERRUPT_INIT |
2943 CPU_INTERRUPT_SIPI |
2944 CPU_INTERRUPT_MCE));
2947 static Property x86_cpu_properties[] = {
2948 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2949 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2950 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2951 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2952 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2953 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2954 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2955 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2956 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
2957 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
2958 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
2959 DEFINE_PROP_END_OF_LIST()
2962 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2964 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2965 CPUClass *cc = CPU_CLASS(oc);
2966 DeviceClass *dc = DEVICE_CLASS(oc);
2968 xcc->parent_realize = dc->realize;
2969 dc->realize = x86_cpu_realizefn;
2970 dc->bus_type = TYPE_ICC_BUS;
2971 dc->props = x86_cpu_properties;
2973 xcc->parent_reset = cc->reset;
2974 cc->reset = x86_cpu_reset;
2975 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2977 cc->class_by_name = x86_cpu_class_by_name;
2978 cc->parse_features = x86_cpu_parse_featurestr;
2979 cc->has_work = x86_cpu_has_work;
2980 cc->do_interrupt = x86_cpu_do_interrupt;
2981 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2982 cc->dump_state = x86_cpu_dump_state;
2983 cc->set_pc = x86_cpu_set_pc;
2984 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2985 cc->gdb_read_register = x86_cpu_gdb_read_register;
2986 cc->gdb_write_register = x86_cpu_gdb_write_register;
2987 cc->get_arch_id = x86_cpu_get_arch_id;
2988 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2989 #ifdef CONFIG_USER_ONLY
2990 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2991 #else
2992 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2993 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2994 cc->write_elf64_note = x86_cpu_write_elf64_note;
2995 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2996 cc->write_elf32_note = x86_cpu_write_elf32_note;
2997 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2998 cc->vmsd = &vmstate_x86_cpu;
2999 #endif
3000 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3001 #ifndef CONFIG_USER_ONLY
3002 cc->debug_excp_handler = breakpoint_handler;
3003 #endif
3004 cc->cpu_exec_enter = x86_cpu_exec_enter;
3005 cc->cpu_exec_exit = x86_cpu_exec_exit;
3008 static const TypeInfo x86_cpu_type_info = {
3009 .name = TYPE_X86_CPU,
3010 .parent = TYPE_CPU,
3011 .instance_size = sizeof(X86CPU),
3012 .instance_init = x86_cpu_initfn,
3013 .abstract = true,
3014 .class_size = sizeof(X86CPUClass),
3015 .class_init = x86_cpu_common_class_init,
3018 static void x86_cpu_register_types(void)
3020 int i;
3022 type_register_static(&x86_cpu_type_info);
3023 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3024 x86_register_cpudef_type(&builtin_x86_defs[i]);
3026 #ifdef CONFIG_KVM
3027 type_register_static(&host_x86_cpu_type_info);
3028 #endif
3031 type_init(x86_cpu_register_types)